From 6bd61705ca0419a2ffd55bcd59d037f0ca5f50a7 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 10 Jan 2024 18:34:08 -0300 Subject: [PATCH 001/139] Add dockerfile for executable node --- node/Dockerfile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 node/Dockerfile diff --git a/node/Dockerfile b/node/Dockerfile new file mode 100644 index 00000000..0ca910d2 --- /dev/null +++ b/node/Dockerfile @@ -0,0 +1,15 @@ +# Build Stage +FROM rust:1.72.1 as build +COPY . /node/ +WORKDIR /node +RUN apt-get update && apt-get install -y libclang-dev +RUN cargo build --release + +# Runtime Stage +FROM debian:stable-slim as runtime + +COPY --from=build /node/target/release/localnet_config /node/ +COPY --from=build /node/target/release/executor /node/ +COPY --from=build /node/tools/addresses.txt /node/ + +EXPOSE 3054 From b2fb9ab78e33f4d850f8bdaa1832bf04e5f74811 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 10 Jan 2024 18:34:21 -0300 Subject: [PATCH 002/139] Add compose file for testing purposes --- node/compose.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 node/compose.yaml diff --git a/node/compose.yaml b/node/compose.yaml new file mode 100644 index 00000000..f3d67050 --- /dev/null +++ b/node/compose.yaml @@ -0,0 +1,21 @@ +version: "3.9" + +services: + node-1: + build: . + networks: + node_net: + ipv4_address: 172.12.0.10 + node-2: + build: . + networks: + node_net: + ipv4_address: 172.12.0.11 + +networks: + node_net: + name: node_net + ipam: + config: + - subnet: "172.12.0.0/24" + gateway: "172.12.0.1" From e768bc6a2000bb2beef0178bee9eaff5f9cbc465 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 12 Jan 2024 03:11:10 -0300 Subject: [PATCH 003/139] Add entrypoint for node consensus dockerfile --- node/Dockerfile | 8 ++++++++ node/entrypoint.sh | 5 +++++ 2 files changed, 13 insertions(+) create mode 100755 node/entrypoint.sh diff --git a/node/Dockerfile b/node/Dockerfile index 0ca910d2..34fef184 100644 --- a/node/Dockerfile +++ b/node/Dockerfile @@ -4,6 +4,7 @@ COPY . /node/ WORKDIR /node RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release +RUN cd tools && make node_configs # Runtime Stage FROM debian:stable-slim as runtime @@ -11,5 +12,12 @@ FROM debian:stable-slim as runtime COPY --from=build /node/target/release/localnet_config /node/ COPY --from=build /node/target/release/executor /node/ COPY --from=build /node/tools/addresses.txt /node/ +COPY --from=build /node/tools/node-configs /node/ +COPY --from=build /node/entrypoint.sh /node/ + +WORKDIR /node +RUN chmod +x entrypoint.sh + +ENTRYPOINT ["./entrypoint.sh"] EXPOSE 3054 diff --git a/node/entrypoint.sh b/node/entrypoint.sh new file mode 100755 index 00000000..b96a4835 --- /dev/null +++ b/node/entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd $(hostname -i):3054 +export RUST_LOG=INFO +../executor From e6b64879f7bc0e54d042d365c21c2b9ecc96c513 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 12 Jan 2024 03:11:45 -0300 Subject: [PATCH 004/139] Add some comments and improve compose test file --- node/compose.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/node/compose.yaml b/node/compose.yaml index f3d67050..10d2032e 100644 --- a/node/compose.yaml +++ b/node/compose.yaml @@ -3,18 +3,24 @@ version: "3.9" services: node-1: build: . + image: test-node networks: node_net: + # This allow us to know the ip of the node-1 container to fill the address in the config file + # Only for test purposes, may be removed in the future ipv4_address: 172.12.0.10 node-2: - build: . + image: test-node + depends_on: [node-1] networks: node_net: + # This allow us to know the ip of the node-2 container to fill the address in the config file + # Only for test purposes, may be removed in the future ipv4_address: 172.12.0.11 networks: node_net: - name: node_net + name: node-net ipam: config: - subnet: "172.12.0.0/24" From 4ca7fdc649231cbb31f07b6a748722b21ce0ac36 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 12 Jan 2024 03:12:03 -0300 Subject: [PATCH 005/139] Add new makefile commands to run dockerized consensus node --- node/tools/Makefile | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index 668c04c8..0941b131 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -1,12 +1,34 @@ .PHONY: run-node node-configs clean IP?=127.0.0.1:3054 +DOCKER_IP=172.12.0.10 -run-node: +node: export RUST_LOG=INFO && cd node-configs/${IP} && cargo run -- --database ./database/${IP} -node-configs: +node_configs: cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs -clean: +node_docker: + rm -rf addresses.txt + touch addresses.txt + echo ${DOCKER_IP}:3054 >> addresses.txt + docker build -t test-node ../ + docker network create --subnet=172.12.0.0/16 node-net + docker run --name test-node-consensus --net node-net --ip ${DOCKER_IP} -itd test-node + +consenus_docker_example: + rm -rf addresses.txt + echo 172.12.0.10:3054 >> addresses.txt + echo 172.12.0.11:3054 >> addresses.txt + cd .. && docker-compose up -d + +clean: clean_docker rm -rf node-configs rm -rf database + +clean_docker: + docker rm test-consensus-node + docker rm node-node-1-1 + docker rm node-node-2-1 + docker network rm node-net + docker image rm test-node From 28df58b02636a24d870807a20067f2084768aaec Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 12 Jan 2024 03:12:32 -0300 Subject: [PATCH 006/139] Update readme with docs to run consensus node in docker --- node/tools/README.md | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/node/tools/README.md b/node/tools/README.md index ff01923b..2bdf636e 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -1,5 +1,27 @@ -# Running a test node +# Running a test consensus node + +## Local 1. Generate a file named `addresses.txt` in the root directory of the tools crate, containing node addresses in the format `IP:PORT`, with each address on a separate line. -2. Run `make node-configs`. This command will establish a directory named `node-configs` and create a folder for each address listed in the `.txt` file, providing the necessary configuration files for the respective node. -3. Execute `make run-node IP=`. The default value for this command would be `127.0.0.1:3054`. Note that running this command will take control of the terminal. +2. Run `make node_configs`. This command will establish a directory named `node-configs` and create a folder for each address listed in the `.txt` file, providing the necessary configuration files for the respective node. +3. Execute `make node IP=`. The default value for this command would be `127.0.0.1:3054`. Note that running this command will take control of the terminal. + +## Dockerized + +To get up a standalone consensus node running in a docker container just run the following command inside the tools crate: + +`make node_docker` + +This will create a container running a single node advancing views and finalizing blocks. + +To set up a simple example with two different nodes communicating with each other in running in different containers run the following command: + +`make consenus_docker_example` + +This will set up two distinct containers, each hosting a consensus node. These nodes will be interlinked, progressing through views and finalizing blocks achieving consensus between them. + +To clean all the state after running these commands use: + +`make clean_docker` + +> This will delete the generated images and containers, requiring them to be regenerated. From 3e4b881b1895bd11710867dc83c2f5bfd2952e8d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 10:05:29 -0300 Subject: [PATCH 007/139] Delete unnecesary building dependency in compose file --- node/compose.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/node/compose.yaml b/node/compose.yaml index 10d2032e..d14d1ab5 100644 --- a/node/compose.yaml +++ b/node/compose.yaml @@ -3,15 +3,14 @@ version: "3.9" services: node-1: build: . - image: test-node + image: consensus-node networks: node_net: # This allow us to know the ip of the node-1 container to fill the address in the config file # Only for test purposes, may be removed in the future ipv4_address: 172.12.0.10 node-2: - image: test-node - depends_on: [node-1] + image: consensus-node networks: node_net: # This allow us to know the ip of the node-2 container to fill the address in the config file From febcafcf3c583ceef561c6731e5afc58676389a7 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 10:06:31 -0300 Subject: [PATCH 008/139] Rename docker image --- node/tools/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index 0941b131..41c1eb30 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -12,9 +12,9 @@ node_docker: rm -rf addresses.txt touch addresses.txt echo ${DOCKER_IP}:3054 >> addresses.txt - docker build -t test-node ../ + docker build -t consensus-node ../ docker network create --subnet=172.12.0.0/16 node-net - docker run --name test-node-consensus --net node-net --ip ${DOCKER_IP} -itd test-node + docker run --name test-node-consensus --net node-net --ip ${DOCKER_IP} -itd consensus-node consenus_docker_example: rm -rf addresses.txt @@ -31,4 +31,4 @@ clean_docker: docker rm node-node-1-1 docker rm node-node-2-1 docker network rm node-net - docker image rm test-node + docker image rm consensus-node From b2e7e9cc66a4888f78c84ece7fe7a463de57c2c9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 10:13:15 -0300 Subject: [PATCH 009/139] Set container names manually in compose file --- node/compose.yaml | 2 ++ node/tools/Makefile | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/node/compose.yaml b/node/compose.yaml index d14d1ab5..19924690 100644 --- a/node/compose.yaml +++ b/node/compose.yaml @@ -4,6 +4,7 @@ services: node-1: build: . image: consensus-node + container_name: consensus-node-1 networks: node_net: # This allow us to know the ip of the node-1 container to fill the address in the config file @@ -11,6 +12,7 @@ services: ipv4_address: 172.12.0.10 node-2: image: consensus-node + container_name: consensus-node-2 networks: node_net: # This allow us to know the ip of the node-2 container to fill the address in the config file diff --git a/node/tools/Makefile b/node/tools/Makefile index 41c1eb30..334b13a2 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -14,7 +14,7 @@ node_docker: echo ${DOCKER_IP}:3054 >> addresses.txt docker build -t consensus-node ../ docker network create --subnet=172.12.0.0/16 node-net - docker run --name test-node-consensus --net node-net --ip ${DOCKER_IP} -itd consensus-node + docker run --name test-consensus-node --net node-net --ip ${DOCKER_IP} -itd consensus-node consenus_docker_example: rm -rf addresses.txt @@ -28,7 +28,7 @@ clean: clean_docker clean_docker: docker rm test-consensus-node - docker rm node-node-1-1 - docker rm node-node-2-1 + docker rm consensus-node-1 + docker rm consensus-node-2 docker network rm node-net docker image rm consensus-node From 5e27e6ee20a8069a5b9fae18476b3be9f836540e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 12:11:13 -0300 Subject: [PATCH 010/139] Separate config directory for nodes running in docker --- node/Dockerfile | 4 ++-- node/tools/Makefile | 16 +++++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/node/Dockerfile b/node/Dockerfile index 34fef184..86769116 100644 --- a/node/Dockerfile +++ b/node/Dockerfile @@ -4,7 +4,7 @@ COPY . /node/ WORKDIR /node RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release -RUN cd tools && make node_configs +RUN cd tools && make docker_node_configs # Runtime Stage FROM debian:stable-slim as runtime @@ -12,7 +12,7 @@ FROM debian:stable-slim as runtime COPY --from=build /node/target/release/localnet_config /node/ COPY --from=build /node/target/release/executor /node/ COPY --from=build /node/tools/addresses.txt /node/ -COPY --from=build /node/tools/node-configs /node/ +COPY --from=build /node/tools/docker-config/node-configs /node/ COPY --from=build /node/entrypoint.sh /node/ WORKDIR /node diff --git a/node/tools/Makefile b/node/tools/Makefile index 334b13a2..2abde3a7 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -8,18 +8,19 @@ node: node_configs: cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs +docker_node_configs: + cargo run --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs + node_docker: - rm -rf addresses.txt - touch addresses.txt - echo ${DOCKER_IP}:3054 >> addresses.txt - docker build -t consensus-node ../ + mkdir -p docker-config + cd docker-config && rm -rf addresses.txt && touch addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt + docker build -t consensus-node .. docker network create --subnet=172.12.0.0/16 node-net docker run --name test-consensus-node --net node-net --ip ${DOCKER_IP} -itd consensus-node consenus_docker_example: - rm -rf addresses.txt - echo 172.12.0.10:3054 >> addresses.txt - echo 172.12.0.11:3054 >> addresses.txt + mkdir -p docker-config + cd docker-config && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt cd .. && docker-compose up -d clean: clean_docker @@ -27,6 +28,7 @@ clean: clean_docker rm -rf database clean_docker: + rm -rf docker-config docker rm test-consensus-node docker rm consensus-node-1 docker rm consensus-node-2 From 6e2236a50fb91c12f13826094fb96ca2c3111e27 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 12:31:48 -0300 Subject: [PATCH 011/139] Fix node configuration for docker consensus example --- node/tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index 2abde3a7..d7f285fd 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -20,7 +20,7 @@ node_docker: consenus_docker_example: mkdir -p docker-config - cd docker-config && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt + cd docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt cd .. && docker-compose up -d clean: clean_docker From 810a4b52c85981f5abb169c14d83b32dab25c53f Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 14:16:35 -0300 Subject: [PATCH 012/139] Improve command to run a node in a container --- node/tools/Makefile | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index d7f285fd..224558c9 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -13,10 +13,8 @@ docker_node_configs: node_docker: mkdir -p docker-config - cd docker-config && rm -rf addresses.txt && touch addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt - docker build -t consensus-node .. - docker network create --subnet=172.12.0.0/16 node-net - docker run --name test-consensus-node --net node-net --ip ${DOCKER_IP} -itd consensus-node + cd docker-config && rm -rf addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt + cd .. && docker-compose up -d node-1 consenus_docker_example: mkdir -p docker-config From 4088c0a2306f7e2df217276834d13e2394d80250 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 14:37:43 -0300 Subject: [PATCH 013/139] Generate the node configs in release mode --- node/tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index 224558c9..39050947 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -9,7 +9,7 @@ node_configs: cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs docker_node_configs: - cargo run --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs + cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs node_docker: mkdir -p docker-config From 4475489e1c0e55fc85fa4bf84b058867fe1e983c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 14:38:02 -0300 Subject: [PATCH 014/139] Fix docker cleanup to force deletion --- node/tools/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/node/tools/Makefile b/node/tools/Makefile index 39050947..aae1619f 100644 --- a/node/tools/Makefile +++ b/node/tools/Makefile @@ -27,8 +27,8 @@ clean: clean_docker clean_docker: rm -rf docker-config - docker rm test-consensus-node - docker rm consensus-node-1 - docker rm consensus-node-2 - docker network rm node-net - docker image rm consensus-node + docker rm -f consensus-node + docker rm -f consensus-node-1 + docker rm -f consensus-node-2 + docker network rm -f node-net + docker image rm -f consensus-node From 689b6d56b273c160ecb4be810f276f6261e6170b Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 15 Jan 2024 14:38:18 -0300 Subject: [PATCH 015/139] Remove unnecesary copies to container --- node/Dockerfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/node/Dockerfile b/node/Dockerfile index 86769116..88a81d99 100644 --- a/node/Dockerfile +++ b/node/Dockerfile @@ -9,9 +9,7 @@ RUN cd tools && make docker_node_configs # Runtime Stage FROM debian:stable-slim as runtime -COPY --from=build /node/target/release/localnet_config /node/ COPY --from=build /node/target/release/executor /node/ -COPY --from=build /node/tools/addresses.txt /node/ COPY --from=build /node/tools/docker-config/node-configs /node/ COPY --from=build /node/entrypoint.sh /node/ From 99dc69131420690ad1babdbbaf7323bc28e08132 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 16 Jan 2024 15:16:49 -0300 Subject: [PATCH 016/139] Add target dir to docker ignore --- .dockerignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..034b8445 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +/node/target From a3b270de5468035c0466923660959acb2fe4f7c5 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 16 Jan 2024 15:17:29 -0300 Subject: [PATCH 017/139] Move every docker related config file to the project root --- node/Dockerfile => Dockerfile | 6 ++--- Makefile | 41 +++++++++++++++++++++++++++++ node/compose.yaml => compose.yaml | 0 node/entrypoint.sh => entrypoint.sh | 0 node/tools/Makefile | 34 ------------------------ 5 files changed, 44 insertions(+), 37 deletions(-) rename node/Dockerfile => Dockerfile (79%) create mode 100644 Makefile rename node/compose.yaml => compose.yaml (100%) rename node/entrypoint.sh => entrypoint.sh (100%) delete mode 100644 node/tools/Makefile diff --git a/node/Dockerfile b/Dockerfile similarity index 79% rename from node/Dockerfile rename to Dockerfile index 88a81d99..4676cb3d 100644 --- a/node/Dockerfile +++ b/Dockerfile @@ -1,17 +1,17 @@ # Build Stage FROM rust:1.72.1 as build -COPY . /node/ +COPY /node/ Makefile /node/ WORKDIR /node RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release -RUN cd tools && make docker_node_configs +RUN make docker_node_configs # Runtime Stage FROM debian:stable-slim as runtime COPY --from=build /node/target/release/executor /node/ COPY --from=build /node/tools/docker-config/node-configs /node/ -COPY --from=build /node/entrypoint.sh /node/ +COPY entrypoint.sh /node/ WORKDIR /node RUN chmod +x entrypoint.sh diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..f4f28292 --- /dev/null +++ b/Makefile @@ -0,0 +1,41 @@ +.PHONY: node node_configs docker_node_configs node_docker consenus_docker_example clean clean_docker +IP?=127.0.0.1:3054 +DOCKER_IP=172.12.0.10 +EXECUTABLE_NODE_DIR=node/tools + +# Locally run commands + +node: + export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/node-configs/${IP} && cargo run -- --database ../../database/${IP} + +node_configs: + cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs + +# Docker commands + +docker_node_configs: + cd tools && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs + +node_docker: + mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config + cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt + docker-compose up -d node-1 + +consenus_docker_example: + mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config + cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt + docker-compose up -d + +# Clean commands + +clean: clean_docker + rm -rf ${EXECUTABLE_NODE_DIR}/node-configs + rm -rf ${EXECUTABLE_NODE_DIR}/database + +clean_docker: + rm -rf ${EXECUTABLE_NODE_DIR}/docker-config + docker rm -f consensus-node + docker rm -f consensus-node-1 + docker rm -f consensus-node-2 + docker network rm -f node-net + docker image rm -f consensus-node diff --git a/node/compose.yaml b/compose.yaml similarity index 100% rename from node/compose.yaml rename to compose.yaml diff --git a/node/entrypoint.sh b/entrypoint.sh similarity index 100% rename from node/entrypoint.sh rename to entrypoint.sh diff --git a/node/tools/Makefile b/node/tools/Makefile deleted file mode 100644 index aae1619f..00000000 --- a/node/tools/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -.PHONY: run-node node-configs clean -IP?=127.0.0.1:3054 -DOCKER_IP=172.12.0.10 - -node: - export RUST_LOG=INFO && cd node-configs/${IP} && cargo run -- --database ./database/${IP} - -node_configs: - cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs - -docker_node_configs: - cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs - -node_docker: - mkdir -p docker-config - cd docker-config && rm -rf addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt - cd .. && docker-compose up -d node-1 - -consenus_docker_example: - mkdir -p docker-config - cd docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt - cd .. && docker-compose up -d - -clean: clean_docker - rm -rf node-configs - rm -rf database - -clean_docker: - rm -rf docker-config - docker rm -f consensus-node - docker rm -f consensus-node-1 - docker rm -f consensus-node-2 - docker network rm -f node-net - docker image rm -f consensus-node From f93890f8f4b37840ad72470fc047df05243b4281 Mon Sep 17 00:00:00 2001 From: Nacho Avecilla Date: Wed, 17 Jan 2024 15:31:08 +0300 Subject: [PATCH 018/139] Fix typo in README MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bruno França --- node/tools/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/README.md b/node/tools/README.md index 2bdf636e..d049acb5 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -16,7 +16,7 @@ This will create a container running a single node advancing views and finalizin To set up a simple example with two different nodes communicating with each other in running in different containers run the following command: -`make consenus_docker_example` +`make consensus_docker_example` This will set up two distinct containers, each hosting a consensus node. These nodes will be interlinked, progressing through views and finalizing blocks achieving consensus between them. From f3893cf8c8828819b66c9486c36e1512dd077e68 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 17 Jan 2024 09:53:56 -0300 Subject: [PATCH 019/139] Fix typo in makefile command --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index f4f28292..5d99c0b1 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: node node_configs docker_node_configs node_docker consenus_docker_example clean clean_docker +.PHONY: node node_configs docker_node_configs node_docker consensus_docker_example clean clean_docker IP?=127.0.0.1:3054 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools @@ -21,7 +21,7 @@ node_docker: cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt docker-compose up -d node-1 -consenus_docker_example: +consensus_docker_example: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt docker-compose up -d From 1c721c8f968ef36b733662472121b1d7ee3ba942 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 17 Jan 2024 10:03:20 -0300 Subject: [PATCH 020/139] Make the path to makefile be the same for local and docker --- Dockerfile | 5 +++-- Makefile | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4676cb3d..dcdf1e63 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,11 @@ # Build Stage FROM rust:1.72.1 as build -COPY /node/ Makefile /node/ +COPY /node/ /node/ +COPY Makefile . WORKDIR /node RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release -RUN make docker_node_configs +RUN cd .. && make docker_node_configs # Runtime Stage FROM debian:stable-slim as runtime diff --git a/Makefile b/Makefile index 5d99c0b1..eb662cf9 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ node_configs: # Docker commands docker_node_configs: - cd tools && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs node_docker: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config From 8cac863839eea49a41e3fa6cd218d3157db59549 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 17 Jan 2024 15:16:52 +0100 Subject: [PATCH 021/139] disabled clipply lint --- node/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/node/Cargo.toml b/node/Cargo.toml index 81f70c23..bf07a0f6 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -151,3 +151,5 @@ wildcard_dependencies = "warn" redundant_locals = "allow" needless_pass_by_ref_mut = "allow" box_default = "allow" +# remove once fix to https://github.com/rust-lang/rust-clippy/issues/11764 is available on CI. +map_identity = "allow" From d24c8bcc66b6d1600d740a98e2617eedec36cf5a Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 17 Jan 2024 15:19:52 +0100 Subject: [PATCH 022/139] fixed lint, updated deps --- node/Cargo.lock | 451 ++++++++++++---------- node/deny.toml | 3 - node/libs/roles/src/validator/testonly.rs | 2 +- 3 files changed, 246 insertions(+), 210 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index d632c84f..412466e5 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -69,9 +69,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "628a8f9bd1e24b4e0db2b4bc2d000b001e7dd032d54afa60a68836aeec5aa54a" dependencies = [ "anstyle", "anstyle-parse", @@ -89,37 +89,37 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "assert_matches" @@ -129,13 +129,13 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64ct" @@ -195,7 +195,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -212,9 +212,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "bitmaps" @@ -379,9 +379,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -390,9 +390,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.8" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" +checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" dependencies = [ "clap_builder", "clap_derive", @@ -400,9 +400,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.8" +version = "4.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" +checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" dependencies = [ "anstream", "anstyle", @@ -419,7 +419,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -441,15 +441,15 @@ source = "git+https://github.com/slowli/compile-fmt.git?rev=c6a41c846c9a6f70cdba [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "cpufeatures" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -492,36 +492,28 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -568,7 +560,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -583,9 +575,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -646,9 +638,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elsa" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "714f766f3556b44e7e4776ad133fcc3445a489517c25c704ace411bb14790194" +checksum = "d98e71ae4df57d214182a2e5cb90230c0192c6ddfcaa05c36453d46a54713e10" dependencies = [ "stable_deref_trait", ] @@ -661,12 +653,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -729,30 +721,30 @@ checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", ] [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-core", "futures-task", @@ -772,9 +764,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -835,11 +827,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -855,9 +847,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -878,9 +870,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -892,7 +884,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2", "tokio", "tower-service", "tracing", @@ -934,13 +926,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" dependencies = [ "hermit-abi", "rustix", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -963,9 +955,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" @@ -978,18 +970,18 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -1008,18 +1000,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -1040,9 +1032,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" dependencies = [ "cc", "pkg-config", @@ -1051,29 +1043,29 @@ dependencies = [ [[package]] name = "linkme" -version = "0.3.17" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ed2ee9464ff9707af8e9ad834cffa4802f072caad90639c583dd3c62e6e608" +checksum = "8b53ad6a33de58864705954edb5ad5d571a010f9e296865ed43dc72a5621b430" dependencies = [ "linkme-impl", ] [[package]] name = "linkme-impl" -version = "0.3.17" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125974b109d512fccbc6c0244e7580143e460895dfd6ea7f8bbb692fd94396" +checksum = "04e542a18c94a9b6fcc7adb090fa3ba6b79ee220a16404f325672729f32a66ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "linux-raw-sys" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -1111,7 +1103,7 @@ dependencies = [ "proc-macro2", "quote", "regex-syntax 0.6.29", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1144,18 +1136,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" - -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "miette" @@ -1177,7 +1160,7 @@ checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1197,13 +1180,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1274,18 +1257,18 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -1346,7 +1329,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -1382,7 +1365,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1409,15 +1392,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" [[package]] name = "platforms" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "plotters" @@ -1494,19 +1477,19 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] @@ -1531,7 +1514,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1561,7 +1544,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.39", + "syn 2.0.48", "tempfile", "which", ] @@ -1576,7 +1559,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -1642,9 +1625,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -1831,22 +1814,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.25" +version = "0.38.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.4.2", "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "same-file" @@ -1865,15 +1848,15 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" dependencies = [ "serde_derive", ] @@ -1890,20 +1873,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.195" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" dependencies = [ "itoa", "ryu", @@ -1976,9 +1959,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "2593d31f82ead8df961d8bd23a64c2ccf2eb5dd34b0a34bfb4dd54011c72009e" [[package]] name = "snow" @@ -1996,16 +1979,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -2013,14 +1986,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -2057,9 +2030,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -2068,15 +2041,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -2096,27 +2069,27 @@ checksum = "2cfbe7811249c4c914b06141b8ac0f2cee2733fb883d05eb19668a45fc60c3d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -2140,9 +2113,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "powerfmt", @@ -2168,9 +2141,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.34.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -2180,9 +2153,9 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2193,7 +2166,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -2221,7 +2194,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -2265,9 +2238,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" @@ -2353,7 +2326,7 @@ source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b23 dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -2383,9 +2356,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2393,24 +2366,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2418,28 +2391,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" [[package]] name = "web-sys" -version = "0.3.65" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" +checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" dependencies = [ "js-sys", "wasm-bindgen", @@ -2494,7 +2467,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -2503,13 +2485,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -2518,42 +2515,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "yansi" version = "0.5.1" @@ -2577,7 +2616,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] @@ -2810,7 +2849,7 @@ dependencies = [ "prost-reflect", "protox", "quote", - "syn 2.0.39", + "syn 2.0.48", ] [[package]] diff --git a/node/deny.toml b/node/deny.toml index fd63501a..f8535b33 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -52,9 +52,6 @@ skip = [ { name = "regex-automata", version = "0.1.10" }, { name = "regex-syntax", version = "0.6.29" }, - # Old versions required by vise-exporter. - { name = "socket2", version = "0.4.10" }, - # Old versions required by pairing_ce & ff_ce. { name = "rand", version = "0.4" }, { name = "syn", version = "1.0" }, diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 3f5a6832..fb62e749 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -80,7 +80,7 @@ impl PrepareQC { ) -> anyhow::Result { // Get the view number from the messages, they must all be equal. let view = signed_messages - .get(0) + .first() .context("Empty signed messages vector")? .msg .view; From 9520c4c2affdaa61402c02681a75d665e9a05626 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:48:54 -0300 Subject: [PATCH 023/139] Change file name for the docker entrypoint and add comment to the script --- Dockerfile | 6 +++--- docker-entrypoint.sh | 6 ++++++ entrypoint.sh | 0 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100755 docker-entrypoint.sh mode change 100755 => 100644 entrypoint.sh diff --git a/Dockerfile b/Dockerfile index dcdf1e63..df341efc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,11 +12,11 @@ FROM debian:stable-slim as runtime COPY --from=build /node/target/release/executor /node/ COPY --from=build /node/tools/docker-config/node-configs /node/ -COPY entrypoint.sh /node/ +COPY docker-entrypoint.sh /node/ WORKDIR /node -RUN chmod +x entrypoint.sh +RUN chmod +x docker-entrypoint.sh -ENTRYPOINT ["./entrypoint.sh"] +ENTRYPOINT ["./docker-entrypoint.sh"] EXPOSE 3054 diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100755 index 00000000..ee53e262 --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# This file works as an entrypoint of the docker container running the node binary copied inside of it. + +cd $(hostname -i):3054 +export RUST_LOG=INFO +../executor diff --git a/entrypoint.sh b/entrypoint.sh old mode 100755 new mode 100644 From 54c72ce148674007f81194840d4c7d40ccea8319 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:49:42 -0300 Subject: [PATCH 024/139] Move version to latest for rust image --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index df341efc..00e3ba3f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build Stage -FROM rust:1.72.1 as build +FROM rust:latest as build COPY /node/ /node/ COPY Makefile . WORKDIR /node From 13e61e10a1deabde474d5f0b7e1c443fbb068002 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:51:20 -0300 Subject: [PATCH 025/139] Change name of command and dir generation for node config --- Dockerfile | 2 +- Makefile | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 00e3ba3f..3182b42e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ RUN cd .. && make docker_node_configs FROM debian:stable-slim as runtime COPY --from=build /node/target/release/executor /node/ -COPY --from=build /node/tools/docker-config/node-configs /node/ +COPY --from=build /node/tools/docker-config/nodes-config /node/ COPY docker-entrypoint.sh /node/ WORKDIR /node diff --git a/Makefile b/Makefile index eb662cf9..484b3ad8 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: node node_configs docker_node_configs node_docker consensus_docker_example clean clean_docker +.PHONY: node nodes_config docker_node_configs node_docker consensus_docker_example clean clean_docker IP?=127.0.0.1:3054 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools @@ -6,15 +6,16 @@ EXECUTABLE_NODE_DIR=node/tools # Locally run commands node: - export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/node-configs/${IP} && cargo run -- --database ../../database/${IP} + export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/nodes-config/${IP} && cargo run -- --database ../../database/${IP} -node_configs: - cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir node-configs +nodes_config: + cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir nodes-config # Docker commands +# This command will run inside the Dockerfile and it's not necessary to use it outside there. docker_node_configs: - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/node-configs + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/nodes-config node_docker: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config @@ -29,12 +30,11 @@ consensus_docker_example: # Clean commands clean: clean_docker - rm -rf ${EXECUTABLE_NODE_DIR}/node-configs + rm -rf ${EXECUTABLE_NODE_DIR}/nodes-config rm -rf ${EXECUTABLE_NODE_DIR}/database clean_docker: rm -rf ${EXECUTABLE_NODE_DIR}/docker-config - docker rm -f consensus-node docker rm -f consensus-node-1 docker rm -f consensus-node-2 docker network rm -f node-net From 3b8d2992b85cac51486baa602921b311754f92d6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:51:42 -0300 Subject: [PATCH 026/139] Add command to stop dockerized nodes --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 484b3ad8..9943dc2d 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,9 @@ consensus_docker_example: cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt docker-compose up -d +stop_docker_nodes: + docker stop consensus-node-1 consensus-node-2 + # Clean commands clean: clean_docker From 2d09c1d7c444a0638862fddc0e91258da17d8f98 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:52:19 -0300 Subject: [PATCH 027/139] Add example file with local address for node configuration --- node/tools/addresses.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 node/tools/addresses.txt diff --git a/node/tools/addresses.txt b/node/tools/addresses.txt new file mode 100644 index 00000000..550f7a82 --- /dev/null +++ b/node/tools/addresses.txt @@ -0,0 +1 @@ +127.0.0.1:3054 From c287c56be0f3875428151d77e92298938db5d1fb Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 14:52:54 -0300 Subject: [PATCH 028/139] Update README with new updates --- node/tools/README.md | 56 ++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/node/tools/README.md b/node/tools/README.md index d049acb5..64ce9407 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -1,27 +1,53 @@ -# Running a test consensus node +# Running Test Consensus Nodes -## Local +## Local Setup -1. Generate a file named `addresses.txt` in the root directory of the tools crate, containing node addresses in the format `IP:PORT`, with each address on a separate line. -2. Run `make node_configs`. This command will establish a directory named `node-configs` and create a folder for each address listed in the `.txt` file, providing the necessary configuration files for the respective node. -3. Execute `make node IP=`. The default value for this command would be `127.0.0.1:3054`. Note that running this command will take control of the terminal. +1. Edit the `addresses.txt` file located in the root directory of the tools crate. This file contains node addresses in the format `IP:PORT`. For a single node, use the example file. To run multiple nodes communicating with each other, write each node address on a separate line. This will run one node per address. + +2. Move to the project root (era-consensus) and execute the following commands: -## Dockerized + ```bash + make nodes_config + ``` -To get up a standalone consensus node running in a docker container just run the following command inside the tools crate: + This command establishes a directory named `nodes-config` and creates a folder for each address listed in the `.txt` file, providing necessary configuration files for the respective node. + + ```bash + make node IP= + ``` -`make node_docker` + The default value for this command is `127.0.0.1:3054`. Note that running this command will take control of the terminal. -This will create a container running a single node advancing views and finalizing blocks. +## Dockerized Setup -To set up a simple example with two different nodes communicating with each other in running in different containers run the following command: +To launch a standalone consensus node in a Docker container, run the following command in the project root (era-consensus): -`make consensus_docker_example` +```bash +make node_docker +``` -This will set up two distinct containers, each hosting a consensus node. These nodes will be interlinked, progressing through views and finalizing blocks achieving consensus between them. +This command creates a container running a single node that advances views and finalizes blocks. -To clean all the state after running these commands use: +For a simple example with two nodes communicating in different containers, use: -`make clean_docker` +```bash +make consensus_docker_example +``` -> This will delete the generated images and containers, requiring them to be regenerated. +This sets up two containers, each hosting a consensus node, interlinked and progressing through views to finalize blocks, achieving consensus between them. + +To stop the node containers, use: + +```bash +make stop_docker_nodes +``` + +The node will resume the last viewed block from the previous session when initiated again. + +To clean all states after running these commands, use: + +```bash +make clean_docker +``` + +> This deletes the generated images and containers, requiring regeneration. From e66cec9af7d87a9bf6af53edcb8b2e45e5ac341c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 18 Jan 2024 15:00:23 -0300 Subject: [PATCH 029/139] Write a general overview on the README --- node/tools/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/node/tools/README.md b/node/tools/README.md index 64ce9407..833ce1ba 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -1,5 +1,7 @@ # Running Test Consensus Nodes +These instructions guide you through the process of setting up and running a test consensus node in both local and Dockerized environments. Additionally, examples are provided to demonstrate how to run two or more nodes, communicating and doing consensus between them. + ## Local Setup 1. Edit the `addresses.txt` file located in the root directory of the tools crate. This file contains node addresses in the format `IP:PORT`. For a single node, use the example file. To run multiple nodes communicating with each other, write each node address on a separate line. This will run one node per address. From eb7077ef516e5e56c7257f157a338ef863dda07e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:36:43 -0300 Subject: [PATCH 030/139] Add tonic as dependency --- node/Cargo.lock | 267 +++++++++++++++++++++++++++++++++++++++++- node/Cargo.toml | 2 + node/tools/Cargo.toml | 6 + 3 files changed, 273 insertions(+), 2 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 412466e5..2000b635 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -127,6 +127,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "async-trait" version = "0.1.77" @@ -144,6 +166,51 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.69" @@ -734,6 +801,12 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + [[package]] name = "futures-task" version = "0.3.30" @@ -795,12 +868,37 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "h2" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "hashbrown" version = "0.14.3" @@ -878,6 +976,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", + "h2", "http", "http-body", "httparse", @@ -891,6 +990,18 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "im" version = "15.1.0" @@ -905,6 +1016,16 @@ dependencies = [ "version_check", ] +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.1.0" @@ -912,7 +1033,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.14.3", ] [[package]] @@ -1134,6 +1255,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "memchr" version = "2.7.1" @@ -1163,6 +1290,12 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1338,6 +1471,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + [[package]] name = "petgraph" version = "0.6.4" @@ -1345,7 +1484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.1.0", ] [[package]] @@ -1825,6 +1964,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.16" @@ -1957,6 +2102,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + [[package]] name = "smallvec" version = "1.12.0" @@ -2039,6 +2193,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "tempfile" version = "3.9.0" @@ -2158,6 +2318,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.2.0" @@ -2169,6 +2339,97 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -2795,6 +3056,8 @@ dependencies = [ "serde_json", "tempfile", "tokio", + "tonic", + "tonic-build", "tracing", "tracing-subscriber", "vise-exporter", diff --git a/node/Cargo.toml b/node/Cargo.toml index bf07a0f6..a938ca91 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -82,6 +82,8 @@ time = "0.3.23" tokio = { version = "1.34.0", features = ["full"] } tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } +tonic = "0.10.2" +tonic-build = "0.10" # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index c2a67f0a..566ab4f4 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -28,12 +28,14 @@ tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true vise-exporter.workspace = true +tonic.workspace = true [dev-dependencies] tempfile.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true +tonic-build.workspace = true [lints] workspace = true @@ -41,3 +43,7 @@ workspace = true [[bin]] name = "executor" path = "src/main.rs" + +[[bin]] +name = "client" +path = "src/rpc/client.rs" From 058418ef82bc2c3edafb8848a99d37b63b0e0558 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:40:58 -0300 Subject: [PATCH 031/139] Create test proto file with helathcheck endpoint --- node/tools/build.rs | 5 ++++- node/tools/src/proto_test/node.proto | 13 +++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 node/tools/src/proto_test/node.proto diff --git a/node/tools/build.rs b/node/tools/build.rs index e4bba2bd..7767627c 100644 --- a/node/tools/build.rs +++ b/node/tools/build.rs @@ -1,5 +1,5 @@ //! Generates rust code from protobufs. -fn main() { +fn main() -> Result<(), Box> { zksync_protobuf_build::Config { input_root: "src/proto".into(), proto_root: "zksync/tools".into(), @@ -9,4 +9,7 @@ fn main() { } .generate() .unwrap(); + + tonic_build::compile_protos("src/proto_test/node.proto")?; + Ok(()) } diff --git a/node/tools/src/proto_test/node.proto b/node/tools/src/proto_test/node.proto new file mode 100644 index 00000000..54f76a94 --- /dev/null +++ b/node/tools/src/proto_test/node.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package node; + +service Node { + rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse); +} + +message HealthCheckRequest { +} + +message HealthCheckResponse { + string message = 1; +} From f3238735b304c1a253e2a37f3ab4be6d59274081 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:56:39 -0300 Subject: [PATCH 032/139] Add basic rpc server implementation --- node/tools/src/rpc/server.rs | 47 ++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 node/tools/src/rpc/server.rs diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs new file mode 100644 index 00000000..cc90ef2f --- /dev/null +++ b/node/tools/src/rpc/server.rs @@ -0,0 +1,47 @@ +use tonic::{transport::Server, Request, Response, Status}; + +use node::node_server::{Node, NodeServer}; +use node::{HealthCheckRequest, HealthCheckResponse}; + +pub mod node { + tonic::include_proto!("node"); +} + +#[derive(Debug, Default)] +pub struct MyNode {} + +#[tonic::async_trait] +impl Node for MyNode { + async fn health_check( + &self, + _request: Request, + ) -> Result, Status> { + let reply = HealthCheckResponse { + message: format!("Live").into(), + }; + + Ok(Response::new(reply)) + } +} + +pub struct NodeRpcServer { + ip_address: String, +} + +impl NodeRpcServer { + pub fn new(ip_address: String) -> Self { + Self { ip_address } + } + + pub async fn run(&self) -> anyhow::Result<()> { + let addr = self.ip_address.parse()?; + let node = MyNode::default(); + + Server::builder() + .add_service(NodeServer::new(node)) + .serve(addr) + .await?; + + Ok(()) + } +} From d0d9b13767944cbc02ebad7a9dea20c9fdc9afdb Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:57:00 -0300 Subject: [PATCH 033/139] Add basic rpc client implementation to use with CLI --- node/tools/src/rpc/client.rs | 47 ++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 node/tools/src/rpc/client.rs diff --git a/node/tools/src/rpc/client.rs b/node/tools/src/rpc/client.rs new file mode 100644 index 00000000..2df1b744 --- /dev/null +++ b/node/tools/src/rpc/client.rs @@ -0,0 +1,47 @@ +use clap::{Args, Parser, Subcommand}; +use node::node_client::NodeClient; +use node::HealthCheckRequest; + +pub mod node { + tonic::include_proto!("node"); +} + +pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); + +#[derive(Args)] +struct ClientConfig { + #[arg(long)] + server_address: String, +} + +#[derive(Parser)] +#[command(name="client", author, version=VERSION_STRING, about, long_about = None)] +struct ClientCli { + #[command(subcommand)] + command: ClientCommands, + #[clap(flatten)] + config: ClientConfig, +} + +#[derive(Subcommand)] +enum ClientCommands { + #[command(name = "health_check")] + HealthCheck, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let ClientCli { command, config } = ClientCli::parse(); + let mut client = NodeClient::connect(config.server_address).await?; + let response = match command { + ClientCommands::HealthCheck => { + let request = tonic::Request::new(HealthCheckRequest {}); + client.health_check(request).await? + } + }; + let res_message = response.into_inner().message; + + println!("RESPONSE={:?}", res_message); + + Ok(()) +} From a3323271cf2c5d3d8d1b9fba40a90ed8dba46237 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:57:21 -0300 Subject: [PATCH 034/139] Run RPC server for the standalone node --- node/tools/src/lib.rs | 2 ++ node/tools/src/main.rs | 8 +++++++- node/tools/src/rpc/mod.rs | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 node/tools/src/rpc/mod.rs diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index 62ee3cc1..ede64ac0 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -2,9 +2,11 @@ #![allow(missing_docs)] mod config; mod proto; +mod rpc; mod store; #[cfg(test)] mod tests; pub use config::{AppConfig, ConfigPaths}; +pub use rpc::server; diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 8b9f4f69..4ead0b24 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -7,7 +7,7 @@ use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; use vise_exporter::MetricsExporter; use zksync_concurrency::{ctx, scope}; -use zksync_consensus_tools::ConfigPaths; +use zksync_consensus_tools::{server, ConfigPaths}; use zksync_consensus_utils::no_copy::NoCopy; /// Command-line application launching a node executor. @@ -89,6 +89,11 @@ async fn main() -> anyhow::Result<()> { .await .context("configs.into_executor()")?; + // Config for the RPC server. + let mut rpc_addr = configs.app.public_addr.to_string(); + rpc_addr.replace_range(rpc_addr.find(":").unwrap().., ":3051"); + let rpc_server = server::NodeRpcServer::new(rpc_addr.clone()); + // Initialize the storage. scope::run!(ctx, |ctx, s| async { if let Some(addr) = configs.app.metrics_server_addr { @@ -104,6 +109,7 @@ async fn main() -> anyhow::Result<()> { } s.spawn_bg(runner.run(ctx)); s.spawn(executor.run(ctx)); + s.spawn(rpc_server.run()); Ok(()) }) .await diff --git a/node/tools/src/rpc/mod.rs b/node/tools/src/rpc/mod.rs new file mode 100644 index 00000000..74f47ad3 --- /dev/null +++ b/node/tools/src/rpc/mod.rs @@ -0,0 +1 @@ +pub mod server; From 8e2d83ce8ecd51c44d89a151ac433b13e08164e0 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 19 Jan 2024 19:57:44 -0300 Subject: [PATCH 035/139] Update docker config files to expose rpc server --- Dockerfile | 3 ++- compose.yaml | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 3182b42e..6412e1c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM rust:latest as build COPY /node/ /node/ COPY Makefile . WORKDIR /node -RUN apt-get update && apt-get install -y libclang-dev +RUN apt-get update && apt-get install -y libclang-dev protobuf-compiler libprotobuf-dev RUN cargo build --release RUN cd .. && make docker_node_configs @@ -20,3 +20,4 @@ RUN chmod +x docker-entrypoint.sh ENTRYPOINT ["./docker-entrypoint.sh"] EXPOSE 3054 +EXPOSE 3051 diff --git a/compose.yaml b/compose.yaml index 19924690..dc904b3a 100644 --- a/compose.yaml +++ b/compose.yaml @@ -5,6 +5,8 @@ services: build: . image: consensus-node container_name: consensus-node-1 + ports: + - "3051:3051" networks: node_net: # This allow us to know the ip of the node-1 container to fill the address in the config file @@ -13,6 +15,8 @@ services: node-2: image: consensus-node container_name: consensus-node-2 + ports: + - "3051:3051" networks: node_net: # This allow us to know the ip of the node-2 container to fill the address in the config file From 96e18b79b613bebf684b595fbded608d83468884 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 23 Jan 2024 12:09:00 -0300 Subject: [PATCH 036/139] Replace tonic crate for jsonrpsee --- node/Cargo.lock | 754 ++++++++++++++++++++++++++++++++++-------- node/Cargo.toml | 4 +- node/tools/Cargo.toml | 8 +- 3 files changed, 621 insertions(+), 145 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 2000b635..ce3a4d0c 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -128,27 +128,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] -name = "async-stream" -version = "0.3.5" +name = "async-lock" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" dependencies = [ - "async-stream-impl", - "futures-core", + "event-listener", + "event-listener-strategy", "pin-project-lite", ] -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", -] - [[package]] name = "async-trait" version = "0.1.77" @@ -166,51 +155,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core", - "bitflags 1.3.2", - "bytes", - "futures-util", - "http", - "http-body", - "hyper", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper", - "tower", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http", - "http-body", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - [[package]] name = "backtrace" version = "0.3.69" @@ -226,6 +170,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + [[package]] name = "base64" version = "0.21.7" @@ -243,6 +193,9 @@ name = "beef" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", +] [[package]] name = "bindgen" @@ -298,7 +251,16 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest", + "digest 0.10.7", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", ] [[package]] @@ -506,12 +468,37 @@ name = "compile-fmt" version = "0.1.0" source = "git+https://github.com/slowli/compile-fmt.git?rev=c6a41c846c9a6f70cdba4b44c9f3922242ffcf12#c6a41c846c9a6f70cdba4b44c9f3922242ffcf12" +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + [[package]] name = "cpufeatures" version = "0.2.12" @@ -611,7 +598,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest", + "digest 0.10.7", "fiat-crypto", "platforms", "rustc_version", @@ -655,13 +642,22 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -728,6 +724,27 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "event-listener" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f2cdcf274580f2d63697192d744727b3198894b1bf02923643bf59e2c26712" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -780,12 +797,35 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + [[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.30" @@ -793,6 +833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -801,6 +842,23 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "futures-sink" version = "0.3.30" @@ -813,16 +871,28 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + [[package]] name = "futures-util" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ + "futures-channel", "futures-core", + "futures-io", + "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", + "slab", ] [[package]] @@ -905,6 +975,16 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "byteorder", + "num-traits", +] + [[package]] name = "heck" version = "0.4.1" @@ -991,15 +1071,29 @@ dependencies = [ ] [[package]] -name = "hyper-timeout" -version = "0.4.1" +name = "hyper-rustls" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ + "futures-util", + "http", "hyper", - "pin-project-lite", + "log", + "rustls 0.21.10", + "rustls-native-certs 0.6.3", "tokio", - "tokio-io-timeout", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -1098,6 +1192,153 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonrpsee" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-proc-macros", + "jsonrpsee-server", + "jsonrpsee-types", + "jsonrpsee-ws-client", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" +dependencies = [ + "futures-util", + "http", + "jsonrpsee-core", + "pin-project", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "soketto", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" +dependencies = [ + "anyhow", + "async-lock", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "hyper", + "jsonrpsee-types", + "parking_lot", + "pin-project", + "rand 0.8.5", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d94b7505034e2737e688e1153bf81e6f93ad296695c43958d6da2e4321f0a990" +dependencies = [ + "heck", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc7c6d1a2c58f6135810284a390d9f823d0f508db74cd914d8237802de80f98" +dependencies = [ + "futures-util", + "http", + "hyper", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project", + "route-recognizer", + "serde", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3266dfb045c9174b24c77c2dfe0084914bb23a6b2597d70c9dc6018392e1cd1b" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "073c077471e89c4b511fa88b3df9a0f0abdf4a0a2e6683dd2ab36893af87bb2d" +dependencies = [ + "http", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", + "url", +] + [[package]] name = "keccak" version = "0.1.5" @@ -1255,12 +1496,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - [[package]] name = "memchr" version = "2.7.1" @@ -1290,12 +1525,6 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1415,6 +1644,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + [[package]] name = "ordered-float" version = "2.10.1" @@ -1442,6 +1677,12 @@ dependencies = [ "serde", ] +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + [[package]] name = "parking_lot" version = "0.12.1" @@ -1624,6 +1865,16 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +dependencies = [ + "toml_datetime", + "toml_edit", +] + [[package]] name = "proc-macro2" version = "1.0.76" @@ -1707,7 +1958,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ - "base64", + "base64 0.21.7", "logos", "miette", "once_cell", @@ -1920,6 +2171,20 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.48.0", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -1930,6 +2195,12 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "route-recognizer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1965,10 +2236,101 @@ dependencies = [ ] [[package]] -name = "rustversion" -version = "1.0.14" +name = "rustls" +version = "0.21.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.1", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.0.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64 0.21.7", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] [[package]] name = "ryu" @@ -1985,12 +2347,54 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.21" @@ -2038,6 +2442,19 @@ dependencies = [ "serde", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha2" version = "0.10.8" @@ -2046,7 +2463,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -2055,7 +2472,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest", + "digest 0.10.7", "keccak", ] @@ -2143,6 +2560,28 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64 0.13.1", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha-1", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.7.3" @@ -2193,12 +2632,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "tempfile" version = "3.9.0" @@ -2299,6 +2732,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.35.1" @@ -2318,16 +2766,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.2.0" @@ -2339,6 +2777,27 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.14" @@ -2358,6 +2817,7 @@ checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -2365,43 +2825,20 @@ dependencies = [ ] [[package]] -name = "tonic" -version = "0.10.2" +name = "toml_datetime" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64", - "bytes", - "h2", - "http", - "http-body", - "hyper", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "tokio", - "tokio-stream", - "tower", - "tower-layer", - "tower-service", - "tracing", -] +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" [[package]] -name = "tonic-build" -version = "0.10.2" +name = "toml_edit" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "quote", - "syn 2.0.48", + "indexmap 2.1.0", + "toml_datetime", + "winnow", ] [[package]] @@ -2412,6 +2849,7 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "hdrhistogram", "indexmap 1.9.3", "pin-project", "pin-project-lite", @@ -2442,6 +2880,7 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2509,12 +2948,27 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-width" version = "0.1.11" @@ -2531,6 +2985,23 @@ dependencies = [ "subtle", ] +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + [[package]] name = "utf8parse" version = "0.2.1" @@ -2854,6 +3325,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +[[package]] +name = "winnow" +version = "0.5.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" +dependencies = [ + "memchr", +] + [[package]] name = "yansi" version = "0.5.1" @@ -3050,14 +3530,14 @@ dependencies = [ "anyhow", "async-trait", "clap", + "jsonrpsee", "prost", "rand 0.8.5", "rocksdb", "serde_json", "tempfile", "tokio", - "tonic", - "tonic-build", + "tower", "tracing", "tracing-subscriber", "vise-exporter", diff --git a/node/Cargo.toml b/node/Cargo.toml index a938ca91..9503a5bc 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -82,8 +82,8 @@ time = "0.3.23" tokio = { version = "1.34.0", features = ["full"] } tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } -tonic = "0.10.2" -tonic-build = "0.10" +jsonrpsee = { version = "0.21.0", features = ["server", "http-client", "ws-client", "macros", "client-ws-transport-native-tls"] } +tower = { version = "0.4.13", features = ["full"] } # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 566ab4f4..25c007ac 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -28,14 +28,14 @@ tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true vise-exporter.workspace = true -tonic.workspace = true +jsonrpsee.workspace = true +tower.workspace = true [dev-dependencies] tempfile.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true -tonic-build.workspace = true [lints] workspace = true @@ -43,7 +43,3 @@ workspace = true [[bin]] name = "executor" path = "src/main.rs" - -[[bin]] -name = "client" -path = "src/rpc/client.rs" From 6b57f2a26a18827722215aeb782092aa7482d93b Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 23 Jan 2024 12:09:46 -0300 Subject: [PATCH 037/139] Delete protobuff related files --- node/tools/build.rs | 5 +---- node/tools/src/proto_test/node.proto | 13 ------------- 2 files changed, 1 insertion(+), 17 deletions(-) delete mode 100644 node/tools/src/proto_test/node.proto diff --git a/node/tools/build.rs b/node/tools/build.rs index 7767627c..e4bba2bd 100644 --- a/node/tools/build.rs +++ b/node/tools/build.rs @@ -1,5 +1,5 @@ //! Generates rust code from protobufs. -fn main() -> Result<(), Box> { +fn main() { zksync_protobuf_build::Config { input_root: "src/proto".into(), proto_root: "zksync/tools".into(), @@ -9,7 +9,4 @@ fn main() -> Result<(), Box> { } .generate() .unwrap(); - - tonic_build::compile_protos("src/proto_test/node.proto")?; - Ok(()) } diff --git a/node/tools/src/proto_test/node.proto b/node/tools/src/proto_test/node.proto deleted file mode 100644 index 54f76a94..00000000 --- a/node/tools/src/proto_test/node.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; -package node; - -service Node { - rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse); -} - -message HealthCheckRequest { -} - -message HealthCheckResponse { - string message = 1; -} From c0aaa9b4e6baa591572dbe65733cd0f5290be68c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 23 Jan 2024 12:10:05 -0300 Subject: [PATCH 038/139] Delete unnecesary RPC client --- node/tools/src/rpc/client.rs | 47 ------------------------------------ 1 file changed, 47 deletions(-) delete mode 100644 node/tools/src/rpc/client.rs diff --git a/node/tools/src/rpc/client.rs b/node/tools/src/rpc/client.rs deleted file mode 100644 index 2df1b744..00000000 --- a/node/tools/src/rpc/client.rs +++ /dev/null @@ -1,47 +0,0 @@ -use clap::{Args, Parser, Subcommand}; -use node::node_client::NodeClient; -use node::HealthCheckRequest; - -pub mod node { - tonic::include_proto!("node"); -} - -pub const VERSION_STRING: &str = env!("CARGO_PKG_VERSION"); - -#[derive(Args)] -struct ClientConfig { - #[arg(long)] - server_address: String, -} - -#[derive(Parser)] -#[command(name="client", author, version=VERSION_STRING, about, long_about = None)] -struct ClientCli { - #[command(subcommand)] - command: ClientCommands, - #[clap(flatten)] - config: ClientConfig, -} - -#[derive(Subcommand)] -enum ClientCommands { - #[command(name = "health_check")] - HealthCheck, -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let ClientCli { command, config } = ClientCli::parse(); - let mut client = NodeClient::connect(config.server_address).await?; - let response = match command { - ClientCommands::HealthCheck => { - let request = tonic::Request::new(HealthCheckRequest {}); - client.health_check(request).await? - } - }; - let res_message = response.into_inner().message; - - println!("RESPONSE={:?}", res_message); - - Ok(()) -} From 88a5b236e7996418d8de3cb2d9f8e211377411b7 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 23 Jan 2024 12:10:28 -0300 Subject: [PATCH 039/139] Add new jsonrpsee server and health endpoint --- node/tools/src/main.rs | 3 +- node/tools/src/rpc/methods/health_check.rs | 9 +++ node/tools/src/rpc/methods/mod.rs | 1 + node/tools/src/rpc/mod.rs | 1 + node/tools/src/rpc/server.rs | 77 +++++++++------------- 5 files changed, 43 insertions(+), 48 deletions(-) create mode 100644 node/tools/src/rpc/methods/health_check.rs create mode 100644 node/tools/src/rpc/methods/mod.rs diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 4ead0b24..76c54bfa 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -92,7 +92,6 @@ async fn main() -> anyhow::Result<()> { // Config for the RPC server. let mut rpc_addr = configs.app.public_addr.to_string(); rpc_addr.replace_range(rpc_addr.find(":").unwrap().., ":3051"); - let rpc_server = server::NodeRpcServer::new(rpc_addr.clone()); // Initialize the storage. scope::run!(ctx, |ctx, s| async { @@ -109,7 +108,7 @@ async fn main() -> anyhow::Result<()> { } s.spawn_bg(runner.run(ctx)); s.spawn(executor.run(ctx)); - s.spawn(rpc_server.run()); + s.spawn(server::run_server(rpc_addr)); Ok(()) }) .await diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs new file mode 100644 index 00000000..4a7d3abc --- /dev/null +++ b/node/tools/src/rpc/methods/health_check.rs @@ -0,0 +1,9 @@ +use jsonrpsee::types::Params; + +pub fn callback(_params: Params) -> serde_json::Value { + serde_json::json!({"health": true}) +} + +pub fn method() -> &'static str { + "health_check" +} diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs new file mode 100644 index 00000000..4bd7bd48 --- /dev/null +++ b/node/tools/src/rpc/methods/mod.rs @@ -0,0 +1 @@ +pub(crate) mod health_check; diff --git a/node/tools/src/rpc/mod.rs b/node/tools/src/rpc/mod.rs index 74f47ad3..9e05b75e 100644 --- a/node/tools/src/rpc/mod.rs +++ b/node/tools/src/rpc/mod.rs @@ -1 +1,2 @@ +mod methods; pub mod server; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index cc90ef2f..274a64a9 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,47 +1,32 @@ -use tonic::{transport::Server, Request, Response, Status}; - -use node::node_server::{Node, NodeServer}; -use node::{HealthCheckRequest, HealthCheckResponse}; - -pub mod node { - tonic::include_proto!("node"); -} - -#[derive(Debug, Default)] -pub struct MyNode {} - -#[tonic::async_trait] -impl Node for MyNode { - async fn health_check( - &self, - _request: Request, - ) -> Result, Status> { - let reply = HealthCheckResponse { - message: format!("Live").into(), - }; - - Ok(Response::new(reply)) - } -} - -pub struct NodeRpcServer { - ip_address: String, -} - -impl NodeRpcServer { - pub fn new(ip_address: String) -> Self { - Self { ip_address } - } - - pub async fn run(&self) -> anyhow::Result<()> { - let addr = self.ip_address.parse()?; - let node = MyNode::default(); - - Server::builder() - .add_service(NodeServer::new(node)) - .serve(addr) - .await?; - - Ok(()) - } +use std::net::SocketAddr; + +use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; +use super::methods::health_check; + +pub async fn run_server(ip_address: String) -> anyhow::Result<()> { + let ip_address: SocketAddr = ip_address.parse()?; + // Custom tower service to handle the RPC requests + let service_builder = tower::ServiceBuilder::new() + // Proxy `GET /health` requests to internal `system_health` method. + .layer(ProxyGetRequestLayer::new( + "/health", + health_check::method(), + )?); + + let server = Server::builder() + .set_http_middleware(service_builder) + .build(ip_address) + .await?; + let mut module = RpcModule::new(()); + module.register_method(health_check::method(), |params, _| { + health_check::callback(params) + })?; + + let handle = server.start(module); + + // In this example we don't care about doing shutdown so let's it run forever. + // You may use the `ServerHandle` to shut it down or manage it yourself. + tokio::spawn(handle.stopped()); + + Ok(()) } From 890bdeed41ccb1fd3b502680c59008937e265980 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 23 Jan 2024 13:54:08 -0300 Subject: [PATCH 040/139] Add inner docs for new rpc module --- node/tools/src/main.rs | 2 +- node/tools/src/rpc/methods/health_check.rs | 7 +++++-- node/tools/src/rpc/mod.rs | 1 + node/tools/src/rpc/server.rs | 2 +- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 76c54bfa..d9e064ce 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -91,7 +91,7 @@ async fn main() -> anyhow::Result<()> { // Config for the RPC server. let mut rpc_addr = configs.app.public_addr.to_string(); - rpc_addr.replace_range(rpc_addr.find(":").unwrap().., ":3051"); + rpc_addr.replace_range(rpc_addr.find(':').unwrap().., ":3051"); // Initialize the storage. scope::run!(ctx, |ctx, s| async { diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 4a7d3abc..8b826991 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,9 +1,12 @@ +//! Health check method for RPC server. use jsonrpsee::types::Params; -pub fn callback(_params: Params) -> serde_json::Value { +/// Health check response for /health endpoint. +pub(crate) fn callback(_params: Params) -> serde_json::Value { serde_json::json!({"health": true}) } -pub fn method() -> &'static str { +/// Health check method name. +pub(crate) fn method() -> &'static str { "health_check" } diff --git a/node/tools/src/rpc/mod.rs b/node/tools/src/rpc/mod.rs index 9e05b75e..c256bad3 100644 --- a/node/tools/src/rpc/mod.rs +++ b/node/tools/src/rpc/mod.rs @@ -1,2 +1,3 @@ +//! RPC server for testing purposes. mod methods; pub mod server; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 274a64a9..6348dd2a 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; -use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use super::methods::health_check; +use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; pub async fn run_server(ip_address: String) -> anyhow::Result<()> { let ip_address: SocketAddr = ip_address.parse()?; From 8f806bc9a6124c679bede29132febe89515ec86c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Tue, 23 Jan 2024 14:59:28 -0300 Subject: [PATCH 041/139] quick and dirty k8s deployment scripts for test framework --- node/tools/k8s/k8s-deployment.yml | 29 +++++++++++++++++++++++++++++ node/tools/k8s/k8s-namespace.yml | 6 ++++++ node/tools/k8s/k8s-service.yml | 12 ++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 node/tools/k8s/k8s-deployment.yml create mode 100644 node/tools/k8s/k8s-namespace.yml create mode 100644 node/tools/k8s/k8s-service.yml diff --git a/node/tools/k8s/k8s-deployment.yml b/node/tools/k8s/k8s-deployment.yml new file mode 100644 index 00000000..5676a15d --- /dev/null +++ b/node/tools/k8s/k8s-deployment.yml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consensus-node-01 + namespace: consensus +spec: + selector: + matchLabels: + app: consensus-node-01 + replicas: 1 + template: + metadata: + labels: + app: consensus-node-01 + spec: + containers: + - name: consensus-node-01 + image: consensus-node-01 + imagePullPolicy: Never + ports: + - containerPort: 3054 + livenessProbe: + httpGet: + path: /greeting + port: 3054 + readinessProbe: + httpGet: + path: /greeting + port: 3054 \ No newline at end of file diff --git a/node/tools/k8s/k8s-namespace.yml b/node/tools/k8s/k8s-namespace.yml new file mode 100644 index 00000000..2d977ff5 --- /dev/null +++ b/node/tools/k8s/k8s-namespace.yml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: consensus + labels: + name: consensus \ No newline at end of file diff --git a/node/tools/k8s/k8s-service.yml b/node/tools/k8s/k8s-service.yml new file mode 100644 index 00000000..fff121ff --- /dev/null +++ b/node/tools/k8s/k8s-service.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: consensus-node-01 + namespace: consensus +spec: + type: NodePort + selector: + app: consensus-node-01 + ports: + - port: 3054 + nodePort: 30088 \ No newline at end of file From 438a2b8bb0b2af96771c5f212a695a6c4026e9c9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 24 Jan 2024 12:17:00 -0300 Subject: [PATCH 042/139] Change name for generated config folders --- Makefile | 4 ++-- compose.yaml | 6 +++++- docker-entrypoint.sh | 2 +- node/tools/README.md | 6 +++--- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 9943dc2d..8a8fe423 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,12 @@ .PHONY: node nodes_config docker_node_configs node_docker consensus_docker_example clean clean_docker -IP?=127.0.0.1:3054 +NODE?=0 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools # Locally run commands node: - export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/nodes-config/${IP} && cargo run -- --database ../../database/${IP} + export RUST_LOG=INFO && cd ${EXECUTABLE_NODE_DIR}/nodes-config/node_${NODE} && cargo run -- --database ../../database/node_${NODE} nodes_config: cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs addresses.txt --output-dir nodes-config diff --git a/compose.yaml b/compose.yaml index dc904b3a..3e567c9c 100644 --- a/compose.yaml +++ b/compose.yaml @@ -5,6 +5,8 @@ services: build: . image: consensus-node container_name: consensus-node-1 + environment: + - NODE_ID=node_0 ports: - "3051:3051" networks: @@ -15,8 +17,10 @@ services: node-2: image: consensus-node container_name: consensus-node-2 + environment: + - NODE_ID=node_1 ports: - - "3051:3051" + - "3052:3051" networks: node_net: # This allow us to know the ip of the node-2 container to fill the address in the config file diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index ee53e262..3286c01c 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash # This file works as an entrypoint of the docker container running the node binary copied inside of it. -cd $(hostname -i):3054 +cd ${NODE_ID} export RUST_LOG=INFO ../executor diff --git a/node/tools/README.md b/node/tools/README.md index 833ce1ba..0ee9d3b0 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -12,13 +12,13 @@ These instructions guide you through the process of setting up and running a tes make nodes_config ``` - This command establishes a directory named `nodes-config` and creates a folder for each address listed in the `.txt` file, providing necessary configuration files for the respective node. + This command creates a directory named `nodes-config` and generates a folder for each address listed in the `.txt` file with the name `node_{NODE_NUMBER}`, providing essential configuration files for the corresponding node. The `NODE_NUMBER` is simply a numerical identifier for each node. ```bash - make node IP= + make node NODE= ``` - The default value for this command is `127.0.0.1:3054`. Note that running this command will take control of the terminal. + The default value for this command is set to `0` for launching the initial node, and you can increment the number for subsequent nodes. Note that running this command will take control of the terminal. ## Dockerized Setup From 19d7fcc68c98acc0cf00b8031db63acb17c6ccf7 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 24 Jan 2024 12:17:30 -0300 Subject: [PATCH 043/139] Update localnet config bin --- node/tools/src/bin/localnet_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 839e727d..4ee1c4b4 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -105,7 +105,7 @@ fn main() -> anyhow::Result<()> { for (i, cfg) in cfgs.into_iter().enumerate() { // Recreate the directory for the node's config. - let root = args.output_dir.join(addrs[i].to_string()); + let root = args.output_dir.join(format!("node_{}", i)); let _ = fs::remove_dir_all(&root); fs::create_dir_all(&root).with_context(|| format!("create_dir_all({:?})", root))?; From b49af9635e95df86a5e15d11f54c14ecd38c7c89 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 24 Jan 2024 13:13:18 -0300 Subject: [PATCH 044/139] Delete old docker entrypoint file --- entrypoint.sh | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 entrypoint.sh diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100644 index b96a4835..00000000 --- a/entrypoint.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd $(hostname -i):3054 -export RUST_LOG=INFO -../executor From 2fd8061304b143c932285aedcd638650dca516d9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 24 Jan 2024 13:13:58 -0300 Subject: [PATCH 045/139] Update k8s deployment manifest adding node env var --- node/tools/k8s/k8s-deployment.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/tools/k8s/k8s-deployment.yml b/node/tools/k8s/k8s-deployment.yml index 5676a15d..a8ec145b 100644 --- a/node/tools/k8s/k8s-deployment.yml +++ b/node/tools/k8s/k8s-deployment.yml @@ -16,6 +16,9 @@ spec: containers: - name: consensus-node-01 image: consensus-node-01 + env: + - name: NODE_ID + value: "node_0" imagePullPolicy: Never ports: - containerPort: 3054 @@ -26,4 +29,4 @@ spec: readinessProbe: httpGet: path: /greeting - port: 3054 \ No newline at end of file + port: 3054 From 758ff9e3ddf9feec5ec58d9f695d03fd648911ff Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 11:44:48 -0300 Subject: [PATCH 046/139] Add new stage in dockerfile to generate executor binary --- Dockerfile | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index bedfa5ae..8d44e5e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,19 @@ # Build Stage -FROM rust:latest as build +FROM rust:latest as builder COPY /node/ /node/ -COPY Makefile . WORKDIR /node RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release -RUN cd .. && make docker_node_configs + +# Binary copy stage +FROM scratch as binary +COPY --from=builder /node/target/release/executor . # Runtime Stage FROM debian:stable-slim as runtime -COPY --from=build /node/target/release/executor /node/ -COPY --from=build /node/tools/docker-config/nodes-config /node/ +COPY /node/tools/docker_binaries/executor /node/ +COPY /node/tools/docker-config/nodes-config /node/ COPY docker-entrypoint.sh /node/ WORKDIR /node From addacc8fadd05f941130c6f0db38a3793c64eaae Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 11:46:04 -0300 Subject: [PATCH 047/139] Build correct target in compose file --- compose.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/compose.yaml b/compose.yaml index 3e567c9c..1510f5df 100644 --- a/compose.yaml +++ b/compose.yaml @@ -2,7 +2,9 @@ version: "3.9" services: node-1: - build: . + build: + context: . + target: runtime image: consensus-node container_name: consensus-node-1 environment: From e263eb60105e5c48cd1d709a73442626b488beb6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 11:49:47 -0300 Subject: [PATCH 048/139] Update Makefile with all new targets --- Makefile | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 8a8fe423..b1c5b1ce 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: node nodes_config docker_node_configs node_docker consensus_docker_example clean clean_docker +.PHONY: node nodes_config docker_nodes_config docker_node consensus_docker_example clean clean_docker NODE?=0 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools @@ -13,18 +13,24 @@ nodes_config: # Docker commands -# This command will run inside the Dockerfile and it's not necessary to use it outside there. -docker_node_configs: +docker_build_executor: + docker build --output=node/tools/docker_binaries --target=binary . + +docker_node_image: + docker build -t consensus-node --target=runtime . + +docker_nodes_config: cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/nodes-config -node_docker: - mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config - cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && echo ${DOCKER_IP}:3054 >> addresses.txt - docker-compose up -d node-1 +docker_node: + $(MAKE) docker_node_image + docker run -d --name consensus-node-${NODE} --env NODE_ID="node_${NODE}" consensus-node consensus_docker_example: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config - cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && touch addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt + cd ${EXECUTABLE_NODE_DIR}/docker-config && rm -rf addresses.txt && echo 172.12.0.10:3054 >> addresses.txt && echo 172.12.0.11:3054 >> addresses.txt + $(MAKE) docker_nodes_config + $(MAKE) docker_node_image docker-compose up -d stop_docker_nodes: @@ -38,6 +44,7 @@ clean: clean_docker clean_docker: rm -rf ${EXECUTABLE_NODE_DIR}/docker-config + rm -rf ${EXECUTABLE_NODE_DIR}/docker_binaries docker rm -f consensus-node-1 docker rm -f consensus-node-2 docker network rm -f node-net From 9503f05bd1c4e6e8d188a8899e27112adad8a333 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 11:50:15 -0300 Subject: [PATCH 049/139] Add addresses example file for docker config --- node/tools/docker-config/addresses.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 node/tools/docker-config/addresses.txt diff --git a/node/tools/docker-config/addresses.txt b/node/tools/docker-config/addresses.txt new file mode 100644 index 00000000..7236c599 --- /dev/null +++ b/node/tools/docker-config/addresses.txt @@ -0,0 +1 @@ +0.0.0.0:3054 From 8bcdb881257c4d3d3591a1c940e1632f6425a047 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 11:52:09 -0300 Subject: [PATCH 050/139] Remove docker config dir from clean command --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b1c5b1ce..31882b2b 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ clean: clean_docker rm -rf ${EXECUTABLE_NODE_DIR}/database clean_docker: - rm -rf ${EXECUTABLE_NODE_DIR}/docker-config + rm -rf ${EXECUTABLE_NODE_DIR}/docker-config/nodes-config rm -rf ${EXECUTABLE_NODE_DIR}/docker_binaries docker rm -f consensus-node-1 docker rm -f consensus-node-2 From 2323211941c6addf8a12ec72ab712f32fc1e9cef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 25 Jan 2024 15:23:04 -0300 Subject: [PATCH 051/139] Added some utility targets to Makefile --- Makefile | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 31882b2b..ee5d04c5 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: node nodes_config docker_nodes_config docker_node consensus_docker_example clean clean_docker +.PHONY: node nodes_config docker_node_configs node_docker consensus_docker_example clean clean_docker addresses_file blank_configs NODE?=0 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools +NODES=4 # Locally run commands @@ -49,3 +50,17 @@ clean_docker: docker rm -f consensus-node-2 docker network rm -f node-net docker image rm -f consensus-node + +addresses_file: + mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config + cd ${EXECUTABLE_NODE_DIR}/docker-config && \ + rm -rf addresses.txt && \ + touch addresses.txt && \ + for n in $$(seq 0 $$((${NODES} - 1))); do echo 0.0.0.$$n:3054 >> addresses.txt; done + +blank_configs: addresses_file docker_node_configs + for n in $$(seq 0 $$((${NODES} - 1))); do \ + jq '.publicAddr = "0.0.0.0:3054"' node/tools/docker-config/nodes-config/node_$$n/config.json | \ + jq '.gossipStaticOutbound = "[]"' > node/tools/docker-config/nodes-config/node_$$n/config.tmp && \ + mv -f node/tools/docker-config/nodes-config/node_$$n/config.tmp node/tools/docker-config/nodes-config/node_$$n/config.json; \ + done \ No newline at end of file From 2720e749f29530e0a305673d1d5ecaaa60aa9351 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 15:34:23 -0300 Subject: [PATCH 052/139] Make rpc port a CLI argument for the executor --- node/tools/src/main.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index d9e064ce..3166b699 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -26,6 +26,9 @@ struct Args { /// Path to the rocksdb database of the node. #[arg(long, default_value = "./database")] database: PathBuf, + /// Port for the RPC server. + #[arg(long)] + rpc_port: Option, } impl Args { @@ -89,9 +92,12 @@ async fn main() -> anyhow::Result<()> { .await .context("configs.into_executor()")?; - // Config for the RPC server. - let mut rpc_addr = configs.app.public_addr.to_string(); - rpc_addr.replace_range(rpc_addr.find(':').unwrap().., ":3051"); + let mut rpc_addr = configs.app.public_addr; + if let Some(port) = args.rpc_port { + rpc_addr.set_port(port); + } else { + rpc_addr.set_port(rpc_addr.port() + 100); + } // Initialize the storage. scope::run!(ctx, |ctx, s| async { From af1caaa0cb133e59d7f5e50d6c872c1c89fae15d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 15:34:35 -0300 Subject: [PATCH 053/139] Change param type to run the server --- node/tools/src/rpc/server.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 6348dd2a..70a8468d 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -3,8 +3,7 @@ use std::net::SocketAddr; use super::methods::health_check; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; -pub async fn run_server(ip_address: String) -> anyhow::Result<()> { - let ip_address: SocketAddr = ip_address.parse()?; +pub async fn run_server(ip_address: SocketAddr) -> anyhow::Result<()> { // Custom tower service to handle the RPC requests let service_builder = tower::ServiceBuilder::new() // Proxy `GET /health` requests to internal `system_health` method. From 24cddea8298a07bbb10f4e2a16b611094684fb94 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 17:38:35 -0300 Subject: [PATCH 054/139] Add config generation for k8s node deployer --- node/tools/Cargo.toml | 4 ++ node/tools/src/bin/deployer.rs | 97 ++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 node/tools/src/bin/deployer.rs diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 25c007ac..0cfe5c85 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -43,3 +43,7 @@ workspace = true [[bin]] name = "executor" path = "src/main.rs" + +[[bin]] +name = "deployer" +path = "src/bin/deployer.rs" diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs new file mode 100644 index 00000000..4e3f53d6 --- /dev/null +++ b/node/tools/src/bin/deployer.rs @@ -0,0 +1,97 @@ +//! Deployer for the kubernetes cluster. +use std::{fs, net::SocketAddr, path::PathBuf}; + +use anyhow::Context; +use clap::Parser; +use rand::Rng; +use zksync_consensus_bft::testonly; +use zksync_consensus_crypto::TextFmt; +use zksync_consensus_roles::{node, validator}; +use zksync_consensus_tools::AppConfig; + +/// Ports for the nodes to listen on kubernetes pod. +const NODES_PORT: u16 = 3054; + +/// Command line arguments. +#[derive(Debug, Parser)] +struct Args { + /// Amount of nodes to deploy. + #[arg(long)] + nodes: usize, +} + +/// Encodes a generated proto message to json for arbitrary ProtoFmt. +fn encode_json(x: &T) -> String { + let mut s = serde_json::Serializer::pretty(vec![]); + zksync_protobuf::serde::serialize(x, &mut s).unwrap(); + String::from_utf8(s.into_inner()).unwrap() +} + +fn main() -> anyhow::Result<()> { + let args = Args::parse(); + let nodes = args.nodes; + assert!(nodes > 0, "at least 1 node has to be specified"); + + // Generate the keys for all the replicas. + let rng = &mut rand::thread_rng(); + let validator_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); + let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); + + // Generate the genesis block. + // TODO: generating genesis block shouldn't require knowing the private keys. + let (genesis, validator_set) = testonly::make_genesis( + &validator_keys, + validator::Payload(vec![]), + validator::BlockNumber(0), + ); + + // Each node will have `gossip_peers` outbound peers. + let peers = 2; + + let mut cfgs: Vec<_> = (0..args.nodes) + .map(|_| AppConfig { + server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + metrics_server_addr: None, + + validators: validator_set.clone(), + genesis_block: genesis.clone(), + + gossip_dynamic_inbound_limit: 2, + gossip_static_inbound: [].into(), + gossip_static_outbound: [].into(), + }) + .collect(); + + // Construct a gossip network with optimal diameter. + for (i, node_key) in node_keys.iter().enumerate() { + for j in 0..peers { + let next = (i * peers + j + 1) % args.nodes; + cfgs[next].gossip_static_inbound.insert(node_key.public()); + } + } + + let manifest_path = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + let root = PathBuf::from(manifest_path).join("k8s_configs"); + let _ = fs::remove_dir_all(&root); + for (i, cfg) in cfgs.into_iter().enumerate() { + // Recreate the directory for the node's config. + let node_config_dir = root.join(format!("node_{}", i)); + fs::create_dir_all(&node_config_dir) + .with_context(|| format!("create_dir_all({:?})", node_config_dir))?; + + fs::write(node_config_dir.join("config.json"), encode_json(&cfg)).context("fs::write()")?; + fs::write( + node_config_dir.join("validator_key"), + &TextFmt::encode(&validator_keys[i]), + ) + .context("fs::write()")?; + fs::write( + node_config_dir.join("node_key"), + &TextFmt::encode(&node_keys[i]), + ) + .context("fs::write()")?; + } + + Ok(()) +} From 2cfaff49173d2150019480bf7b04f9ba0d2966e2 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 25 Jan 2024 17:38:46 -0300 Subject: [PATCH 055/139] Add makefile target to clean k8s config --- Makefile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index ee5d04c5..7a1e9548 100644 --- a/Makefile +++ b/Makefile @@ -39,10 +39,13 @@ stop_docker_nodes: # Clean commands -clean: clean_docker +clean: clean_docker clean_k8s rm -rf ${EXECUTABLE_NODE_DIR}/nodes-config rm -rf ${EXECUTABLE_NODE_DIR}/database +clean_k8s: + rm -rf ${EXECUTABLE_NODE_DIR}/k8s_configs + clean_docker: rm -rf ${EXECUTABLE_NODE_DIR}/docker-config/nodes-config rm -rf ${EXECUTABLE_NODE_DIR}/docker_binaries @@ -63,4 +66,4 @@ blank_configs: addresses_file docker_node_configs jq '.publicAddr = "0.0.0.0:3054"' node/tools/docker-config/nodes-config/node_$$n/config.json | \ jq '.gossipStaticOutbound = "[]"' > node/tools/docker-config/nodes-config/node_$$n/config.tmp && \ mv -f node/tools/docker-config/nodes-config/node_$$n/config.tmp node/tools/docker-config/nodes-config/node_$$n/config.json; \ - done \ No newline at end of file + done From 4ed83cc32b6c22a241e094160b780d012ba8ed9b Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 26 Jan 2024 19:18:16 -0300 Subject: [PATCH 056/139] Improve config generation struct --- node/tools/src/config.rs | 108 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 100 insertions(+), 8 deletions(-) diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index d281e69e..30afddfd 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -1,19 +1,28 @@ //! Node configuration. use crate::{proto, store}; use anyhow::Context as _; +use bft::testonly; +use rand::Rng; use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::RandomState, HashMap, HashSet}, fs, + net::SocketAddr, path::{Path, PathBuf}, }; use zksync_concurrency::ctx; use zksync_consensus_bft as bft; use zksync_consensus_crypto::{read_optional_text, read_required_text, Text, TextFmt}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::{ + node::{self, PublicKey}, + validator, +}; use zksync_consensus_storage::{BlockStore, BlockStoreRunner, PersistentBlockStore}; use zksync_protobuf::{required, ProtoFmt}; +/// Ports for the nodes to listen on kubernetes pod. +const NODES_PORT: u16 = 3054; + /// Decodes a proto message from json for arbitrary ProtoFmt. fn decode_json(json: &str) -> anyhow::Result { let mut d = serde_json::Deserializer::from_str(json); @@ -22,20 +31,27 @@ fn decode_json(json: &str) -> anyhow::Result { Ok(p) } +/// Encodes a generated proto message to json for arbitrary ProtoFmt. +fn encode_json(x: &T) -> String { + let mut s = serde_json::Serializer::pretty(vec![]); + zksync_protobuf::serde::serialize(x, &mut s).unwrap(); + String::from_utf8(s.into_inner()).unwrap() +} + /// Node configuration including executor configuration, optional validator configuration, /// and application-specific settings (e.g. metrics scraping). -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct AppConfig { - pub server_addr: std::net::SocketAddr, - pub public_addr: std::net::SocketAddr, - pub metrics_server_addr: Option, + pub server_addr: SocketAddr, + pub public_addr: SocketAddr, + pub metrics_server_addr: Option, pub validators: validator::ValidatorSet, pub genesis_block: validator::FinalBlock, pub gossip_dynamic_inbound_limit: u64, - pub gossip_static_inbound: HashSet, - pub gossip_static_outbound: HashMap, + pub gossip_static_inbound: HashSet, + pub gossip_static_outbound: HashMap, } impl ProtoFmt for AppConfig { @@ -163,6 +179,82 @@ impl<'a> ConfigPaths<'a> { } } +impl AppConfig { + pub fn default_for(nodes_amount: u64) -> AppConfig { + // Generate the keys for all the replicas. + let rng = &mut rand::thread_rng(); + let validator_keys: Vec = + (0..nodes_amount).map(|_| rng.gen()).collect(); + + let (genesis, validator_set) = testonly::make_genesis( + &validator_keys, + validator::Payload(vec![]), + validator::BlockNumber(0), + ); + + Self { + server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + metrics_server_addr: None, + + validators: validator_set.clone(), + genesis_block: genesis.clone(), + + gossip_dynamic_inbound_limit: 2, + gossip_static_inbound: [].into(), + gossip_static_outbound: [].into(), + } + } + + pub fn with_public_addr(&mut self, public_addr: SocketAddr) -> &mut Self { + self.public_addr = public_addr; + self + } + + pub fn with_metrics_server_addr(&mut self, metrics_server_addr: SocketAddr) -> &mut Self { + self.metrics_server_addr = Some(metrics_server_addr); + self + } + + pub fn with_gossip_dynamic_inbound_limit( + &mut self, + gossip_dynamic_inbound_limit: u64, + ) -> &mut Self { + self.gossip_dynamic_inbound_limit = gossip_dynamic_inbound_limit; + self + } + + pub fn with_gossip_static_inbound( + &mut self, + gossip_static_inbound: HashSet, + ) -> &mut Self { + self.gossip_static_inbound = gossip_static_inbound; + self + } + + pub fn with_gossip_static_outbound( + &mut self, + gossip_static_outbound: HashMap, + ) -> &mut Self { + self.gossip_static_outbound = gossip_static_outbound; + self + } + + pub fn add_gossip_static_outbound(&mut self, key: PublicKey, addr: SocketAddr) -> &mut Self { + self.gossip_static_outbound.insert(key, addr); + self + } + + pub fn add_gossip_static_inbound(&mut self, key: PublicKey) -> &mut Self { + self.gossip_static_inbound.insert(key); + self + } + + pub fn write_to_file(&self, path: &Path) -> anyhow::Result<()> { + fs::write(path.join("config.json"), encode_json(self)).context("fs::write()") + } +} + impl Configs { pub async fn make_executor( &self, From 00a008a59c5fa065c38d9bed5aae3204a49913ae Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 26 Jan 2024 19:19:39 -0300 Subject: [PATCH 057/139] Use new config app functions to build localent and k8s config --- node/tools/src/bin/deployer.rs | 38 ++---------------- node/tools/src/bin/localnet_config.rs | 57 +++++---------------------- 2 files changed, 14 insertions(+), 81 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 4e3f53d6..25cd7829 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -1,17 +1,13 @@ //! Deployer for the kubernetes cluster. -use std::{fs, net::SocketAddr, path::PathBuf}; +use std::{fs, path::PathBuf}; use anyhow::Context; use clap::Parser; use rand::Rng; -use zksync_consensus_bft::testonly; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles::{node, validator}; use zksync_consensus_tools::AppConfig; -/// Ports for the nodes to listen on kubernetes pod. -const NODES_PORT: u16 = 3054; - /// Command line arguments. #[derive(Debug, Parser)] struct Args { @@ -20,13 +16,6 @@ struct Args { nodes: usize, } -/// Encodes a generated proto message to json for arbitrary ProtoFmt. -fn encode_json(x: &T) -> String { - let mut s = serde_json::Serializer::pretty(vec![]); - zksync_protobuf::serde::serialize(x, &mut s).unwrap(); - String::from_utf8(s.into_inner()).unwrap() -} - fn main() -> anyhow::Result<()> { let args = Args::parse(); let nodes = args.nodes; @@ -37,37 +26,18 @@ fn main() -> anyhow::Result<()> { let validator_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); - // Generate the genesis block. - // TODO: generating genesis block shouldn't require knowing the private keys. - let (genesis, validator_set) = testonly::make_genesis( - &validator_keys, - validator::Payload(vec![]), - validator::BlockNumber(0), - ); - // Each node will have `gossip_peers` outbound peers. let peers = 2; let mut cfgs: Vec<_> = (0..args.nodes) - .map(|_| AppConfig { - server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - metrics_server_addr: None, - - validators: validator_set.clone(), - genesis_block: genesis.clone(), - - gossip_dynamic_inbound_limit: 2, - gossip_static_inbound: [].into(), - gossip_static_outbound: [].into(), - }) + .map(|_| AppConfig::default_for(nodes as u64)) .collect(); // Construct a gossip network with optimal diameter. for (i, node_key) in node_keys.iter().enumerate() { for j in 0..peers { let next = (i * peers + j + 1) % args.nodes; - cfgs[next].gossip_static_inbound.insert(node_key.public()); + cfgs[next].add_gossip_static_inbound(node_key.public()); } } @@ -80,7 +50,7 @@ fn main() -> anyhow::Result<()> { fs::create_dir_all(&node_config_dir) .with_context(|| format!("create_dir_all({:?})", node_config_dir))?; - fs::write(node_config_dir.join("config.json"), encode_json(&cfg)).context("fs::write()")?; + cfg.write_to_file(&node_config_dir)?; fs::write( node_config_dir.join("validator_key"), &TextFmt::encode(&validator_keys[i]), diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 4ee1c4b4..68c64c9f 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -3,29 +3,10 @@ use anyhow::Context as _; use clap::Parser; use rand::Rng; use std::{fs, net::SocketAddr, path::PathBuf}; -use zksync_consensus_bft::testonly; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles::{node, validator}; use zksync_consensus_tools::AppConfig; -/// Encodes a generated proto message to json for arbitrary ProtoFmt. -fn encode_json(x: &T) -> String { - let mut s = serde_json::Serializer::pretty(vec![]); - zksync_protobuf::serde::serialize(x, &mut s).unwrap(); - String::from_utf8(s.into_inner()).unwrap() -} - -/// Replaces IP of the address with UNSPECIFIED (aka INADDR_ANY) of the corresponding IP type. -/// Opening a listener socket with an UNSPECIFIED IP, means that the new connections -/// on any network interface of the VM will be accepted. -fn with_unspecified_ip(addr: SocketAddr) -> SocketAddr { - let unspecified_ip = match addr { - SocketAddr::V4(_) => std::net::Ipv4Addr::UNSPECIFIED.into(), - SocketAddr::V6(_) => std::net::Ipv6Addr::UNSPECIFIED.into(), - }; - SocketAddr::new(unspecified_ip, addr.port()) -} - /// Command line arguments. #[derive(Debug, Parser)] struct Args { @@ -54,6 +35,7 @@ fn main() -> anyhow::Result<()> { ); } assert!(!addrs.is_empty(), "at least 1 address has to be specified"); + let metrics_server_addr = args .metrics_server_port .map(|port| SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), port)); @@ -63,43 +45,25 @@ fn main() -> anyhow::Result<()> { let validator_keys: Vec = (0..addrs.len()).map(|_| rng.gen()).collect(); let node_keys: Vec = (0..addrs.len()).map(|_| rng.gen()).collect(); - // Generate the genesis block. - // TODO: generating genesis block shouldn't require knowing the private keys. - let (genesis, validator_set) = testonly::make_genesis( - &validator_keys, - validator::Payload(vec![]), - validator::BlockNumber(0), - ); - // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); let peers = 2; - let mut cfgs: Vec<_> = (0..nodes) - .map(|i| AppConfig { - server_addr: with_unspecified_ip(addrs[i]), - public_addr: addrs[i], - metrics_server_addr, - - validators: validator_set.clone(), - genesis_block: genesis.clone(), + let mut default_config = AppConfig::default_for(nodes as u64); - gossip_dynamic_inbound_limit: 0, - gossip_static_inbound: [].into(), - gossip_static_outbound: [].into(), - }) + if let Some(metrics_server_addr) = metrics_server_addr { + default_config.with_metrics_server_addr(metrics_server_addr); + } + let mut cfgs: Vec<_> = (0..nodes) + .map(|i| default_config.with_public_addr(addrs[i]).clone()) .collect(); // Construct a gossip network with optimal diameter. for i in 0..nodes { for j in 0..peers { let next = (i * peers + j + 1) % nodes; - cfgs[i] - .gossip_static_outbound - .insert(node_keys[next].public(), addrs[next]); - cfgs[next] - .gossip_static_inbound - .insert(node_keys[i].public()); + cfgs[i].add_gossip_static_outbound(node_keys[next].public(), addrs[next]); + cfgs[next].add_gossip_static_inbound(node_keys[i].public()); } } @@ -108,8 +72,7 @@ fn main() -> anyhow::Result<()> { let root = args.output_dir.join(format!("node_{}", i)); let _ = fs::remove_dir_all(&root); fs::create_dir_all(&root).with_context(|| format!("create_dir_all({:?})", root))?; - - fs::write(root.join("config.json"), encode_json(&cfg)).context("fs::write()")?; + cfg.write_to_file(&root)?; fs::write( root.join("validator_key"), &TextFmt::encode(&validator_keys[i]), From 4ce804b485b6c574982d738809ec60de759ac2b3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 26 Jan 2024 19:24:39 -0300 Subject: [PATCH 058/139] Add kube as dependency for tools crate --- node/Cargo.lock | 528 ++++++++++++++++++++++++++++++++++++++++++ node/Cargo.toml | 1 + node/tools/Cargo.toml | 1 + 3 files changed, 530 insertions(+) diff --git a/node/Cargo.lock b/node/Cargo.lock index ace1e3c9..023e3b8f 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -52,6 +52,19 @@ dependencies = [ "subtle", ] +[[package]] +name = "ahash" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +dependencies = [ + "cfg-if", + "getrandom", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.2" @@ -61,6 +74,27 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anes" version = "0.1.6" @@ -155,6 +189,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" version = "0.3.69" @@ -368,6 +413,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "chrono" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets 0.52.0", +] + [[package]] name = "ciborium" version = "0.2.1" @@ -617,6 +675,41 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "darling" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.48", +] + +[[package]] +name = "darling_macro" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.48", +] + [[package]] name = "der" version = "0.7.8" @@ -636,6 +729,17 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "diff" version = "0.1.13" @@ -668,6 +772,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dyn-clone" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" + [[package]] name = "ed25519" version = "2.2.3" @@ -820,6 +930,7 @@ checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", + "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -842,6 +953,17 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + [[package]] name = "futures-io" version = "0.3.30" @@ -974,6 +1096,10 @@ name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash", + "allocator-api2", +] [[package]] name = "hdrhistogram" @@ -1034,6 +1160,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + [[package]] name = "httparse" version = "1.8.0" @@ -1086,6 +1218,47 @@ dependencies = [ "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.5.0" @@ -1139,6 +1312,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "is-terminal" version = "0.4.10" @@ -1192,6 +1374,31 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ff1e1486799e3f64129f8ccad108b38290df9cd7015cd31bed17239f0789d6" +dependencies = [ + "serde", + "serde_json", + "thiserror", + "treediff", +] + +[[package]] +name = "jsonpath-rust" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96acbc6188d3bd83519d053efec756aa4419de62ec47be7f28dec297f7dc9eb0" +dependencies = [ + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + [[package]] name = "jsonrpsee" version = "0.21.0" @@ -1339,6 +1546,19 @@ dependencies = [ "url", ] +[[package]] +name = "k8s-openapi" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "301f367a36090b7dfdaac248ee3ed4f14a6a8292e7bec0f1c5e6e2e1f181cd33" +dependencies = [ + "base64 0.21.7", + "chrono", + "serde", + "serde-value", + "serde_json", +] + [[package]] name = "keccak" version = "0.1.5" @@ -1348,6 +1568,112 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kube" +version = "0.88.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "462fe330a0617b276ec864c2255810adcdf519ecb6844253c54074b2086a97bc" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.88.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe0d65dd6f3adba29cfb84f19dfe55449c7f6c35425f9d8294bec40313e0b64" +dependencies = [ + "base64 0.21.7", + "bytes", + "chrono", + "either", + "futures", + "home", + "http", + "http-body", + "hyper", + "hyper-rustls", + "hyper-timeout", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "pin-project", + "rustls 0.21.10", + "rustls-pemfile 1.0.4", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.88.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6b42844e9172f631b8263ea9ce003b9251da13beb1401580937ad206dd82f4c" +dependencies = [ + "chrono", + "form_urlencoded", + "http", + "json-patch", + "k8s-openapi", + "once_cell", + "schemars", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.88.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5b5a111ee287bd237b8190b8c39543ea9fd22f79e9c32a36c24e08234bcda22" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.48", +] + +[[package]] +name = "kube-runtime" +version = "0.88.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc06275064c81056fbb28ea876b3fb339d970e8132282119359afca0835c0ea" +dependencies = [ + "ahash", + "async-trait", + "backoff", + "derivative", + "futures", + "hashbrown 0.14.3", + "json-patch", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "smallvec", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1525,6 +1851,12 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1712,12 +2044,67 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +dependencies = [ + "base64 0.21.7", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "pest_meta" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + [[package]] name = "petgraph" version = "0.6.4" @@ -2356,6 +2743,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -2372,6 +2783,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "secrecy" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" +dependencies = [ + "serde", + "zeroize", +] + [[package]] name = "security-framework" version = "2.9.2" @@ -2431,6 +2852,17 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_json" version = "1.0.111" @@ -2442,6 +2874,19 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_yaml" +version = "0.9.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +dependencies = [ + "indexmap 2.1.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -2766,6 +3211,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.2.0" @@ -2820,6 +3275,7 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", "tracing", ] @@ -2862,6 +3318,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +dependencies = [ + "base64 0.21.7", + "bitflags 2.4.2", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -2936,6 +3413,15 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "treediff" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52984d277bdf2a751072b5df30ec0377febdb02f7696d64c2d7d54630bac4303" +dependencies = [ + "serde_json", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -2948,6 +3434,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -2985,6 +3477,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" + [[package]] name = "untrusted" version = "0.9.0" @@ -3193,6 +3691,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -3340,6 +3847,26 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "zeroize" version = "1.7.0" @@ -3531,6 +4058,7 @@ dependencies = [ "async-trait", "clap", "jsonrpsee", + "kube", "prost", "rand 0.8.5", "rocksdb", diff --git a/node/Cargo.toml b/node/Cargo.toml index 9503a5bc..d15a10e7 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -84,6 +84,7 @@ tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } jsonrpsee = { version = "0.21.0", features = ["server", "http-client", "ws-client", "macros", "client-ws-transport-native-tls"] } tower = { version = "0.4.13", features = ["full"] } +kube = { version = "0.88.1", features = ["runtime", "derive"] } # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 0cfe5c85..fda4e6e4 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -30,6 +30,7 @@ tracing-subscriber.workspace = true vise-exporter.workspace = true jsonrpsee.workspace = true tower.workspace = true +kube.workspace = true [dev-dependencies] tempfile.workspace = true From b723715e1c8882b894bff3fc2a52b161a8725b50 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 29 Jan 2024 15:18:15 -0300 Subject: [PATCH 059/139] Fix node config generation --- node/tools/src/bin/deployer.rs | 8 +++----- node/tools/src/bin/localnet_config.rs | 10 ++++------ node/tools/src/config.rs | 25 ++++++++++++++----------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 25cd7829..12c91839 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -16,22 +16,20 @@ struct Args { nodes: usize, } -fn main() -> anyhow::Result<()> { +fn generate_config() -> anyhow::Result> { let args = Args::parse(); let nodes = args.nodes; assert!(nodes > 0, "at least 1 node has to be specified"); // Generate the keys for all the replicas. let rng = &mut rand::thread_rng(); - let validator_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); // Each node will have `gossip_peers` outbound peers. let peers = 2; - let mut cfgs: Vec<_> = (0..args.nodes) - .map(|_| AppConfig::default_for(nodes as u64)) - .collect(); + let (default_config, validator_keys) = AppConfig::default_for(nodes as u64); + let mut cfgs: Vec<_> = (0..args.nodes).map(|_| default_config.clone()).collect(); // Construct a gossip network with optimal diameter. for (i, node_key) in node_keys.iter().enumerate() { diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 68c64c9f..fc794060 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -40,16 +40,14 @@ fn main() -> anyhow::Result<()> { .metrics_server_port .map(|port| SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), port)); - // Generate the keys for all the replicas. - let rng = &mut rand::thread_rng(); - let validator_keys: Vec = (0..addrs.len()).map(|_| rng.gen()).collect(); - let node_keys: Vec = (0..addrs.len()).map(|_| rng.gen()).collect(); - // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); let peers = 2; - let mut default_config = AppConfig::default_for(nodes as u64); + let rng = &mut rand::thread_rng(); + let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); + + let (mut default_config, validator_keys) = AppConfig::default_for(nodes as u64); if let Some(metrics_server_addr) = metrics_server_addr { default_config.with_metrics_server_addr(metrics_server_addr); diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 30afddfd..ffe0be59 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -180,7 +180,7 @@ impl<'a> ConfigPaths<'a> { } impl AppConfig { - pub fn default_for(nodes_amount: u64) -> AppConfig { + pub fn default_for(nodes_amount: u64) -> (AppConfig, Vec) { // Generate the keys for all the replicas. let rng = &mut rand::thread_rng(); let validator_keys: Vec = @@ -192,18 +192,21 @@ impl AppConfig { validator::BlockNumber(0), ); - Self { - server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - metrics_server_addr: None, + ( + Self { + server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + metrics_server_addr: None, - validators: validator_set.clone(), - genesis_block: genesis.clone(), + validators: validator_set.clone(), + genesis_block: genesis.clone(), - gossip_dynamic_inbound_limit: 2, - gossip_static_inbound: [].into(), - gossip_static_outbound: [].into(), - } + gossip_dynamic_inbound_limit: 2, + gossip_static_inbound: [].into(), + gossip_static_outbound: [].into(), + }, + validator_keys, + ) } pub fn with_public_addr(&mut self, public_addr: SocketAddr) -> &mut Self { From 83589a112cf57de94a540138e92abe1cc95cf9e0 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 29 Jan 2024 17:45:21 -0300 Subject: [PATCH 060/139] Add k8s-openapi as dependency --- node/Cargo.lock | 1 + node/Cargo.toml | 1 + node/tools/Cargo.toml | 1 + 3 files changed, 3 insertions(+) diff --git a/node/Cargo.lock b/node/Cargo.lock index 023e3b8f..862c4f45 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -4058,6 +4058,7 @@ dependencies = [ "async-trait", "clap", "jsonrpsee", + "k8s-openapi", "kube", "prost", "rand 0.8.5", diff --git a/node/Cargo.toml b/node/Cargo.toml index d15a10e7..90d40efd 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -85,6 +85,7 @@ tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } jsonrpsee = { version = "0.21.0", features = ["server", "http-client", "ws-client", "macros", "client-ws-transport-native-tls"] } tower = { version = "0.4.13", features = ["full"] } kube = { version = "0.88.1", features = ["runtime", "derive"] } +k8s-openapi = { version = "0.21.0", features = ["latest"] } # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index fda4e6e4..4525c137 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -31,6 +31,7 @@ vise-exporter.workspace = true jsonrpsee.workspace = true tower.workspace = true kube.workspace = true +k8s-openapi.workspace = true [dev-dependencies] tempfile.workspace = true From 6248277e5f34b13f8ca368ce99752b03f08c250e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 29 Jan 2024 17:45:40 -0300 Subject: [PATCH 061/139] Add subcommands to generate config and deploy pods --- node/tools/src/bin/deployer.rs | 155 ++++++++++++++++++++++++++++++--- 1 file changed, 141 insertions(+), 14 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 12c91839..148330ff 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -2,40 +2,58 @@ use std::{fs, path::PathBuf}; use anyhow::Context; -use clap::Parser; +use clap::{Parser, Subcommand}; +use k8s_openapi::api::{ + apps::v1::Deployment, + core::v1::{Namespace, Pod}, +}; +use kube::{api::PostParams, Api, Client}; use rand::Rng; +use serde_json::json; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles::{node, validator}; use zksync_consensus_tools::AppConfig; /// Command line arguments. #[derive(Debug, Parser)] -struct Args { - /// Amount of nodes to deploy. +#[command(name = "deployer")] +struct DeployerCLI { + #[command(subcommand)] + command: DeployerCommands, +} + +#[derive(Debug, Parser)] +struct SubCommandArgs { #[arg(long)] nodes: usize, } -fn generate_config() -> anyhow::Result> { - let args = Args::parse(); - let nodes = args.nodes; +#[derive(Subcommand, Debug)] +enum DeployerCommands { + /// Generate configs for the nodes. + GenerateConfig(SubCommandArgs), + /// Deploy the nodes. + Deploy(SubCommandArgs), +} + +fn generate_config(nodes: usize) -> anyhow::Result<()> { assert!(nodes > 0, "at least 1 node has to be specified"); + // Each node will have `gossip_peers` outbound peers. + let peers = 2; + // Generate the keys for all the replicas. let rng = &mut rand::thread_rng(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); - // Each node will have `gossip_peers` outbound peers. - let peers = 2; - let (default_config, validator_keys) = AppConfig::default_for(nodes as u64); - let mut cfgs: Vec<_> = (0..args.nodes).map(|_| default_config.clone()).collect(); + let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect(); // Construct a gossip network with optimal diameter. - for (i, node_key) in node_keys.iter().enumerate() { + for i in 0..nodes { for j in 0..peers { - let next = (i * peers + j + 1) % args.nodes; - cfgs[next].add_gossip_static_inbound(node_key.public()); + let next = (i * peers + j + 1) % nodes; + cfgs[next].add_gossip_static_inbound(node_keys[i].public()); } } @@ -43,7 +61,6 @@ fn generate_config() -> anyhow::Result> { let root = PathBuf::from(manifest_path).join("k8s_configs"); let _ = fs::remove_dir_all(&root); for (i, cfg) in cfgs.into_iter().enumerate() { - // Recreate the directory for the node's config. let node_config_dir = root.join(format!("node_{}", i)); fs::create_dir_all(&node_config_dir) .with_context(|| format!("create_dir_all({:?})", node_config_dir))?; @@ -63,3 +80,113 @@ fn generate_config() -> anyhow::Result> { Ok(()) } + +async fn deploy(nodes: usize) -> anyhow::Result<()> { + // Create a Kubernetes client + let client = Client::try_default().await?; + + // Check if namespace consensus is already deployed + let namespaces: Api = Api::all(client.clone()); + let consensus_namespace = namespaces.get_opt("consensus").await?; + if consensus_namespace.is_none() { + // Create a new namespace object + let namespace: Namespace = serde_json::from_value(json!({ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "consensus", + "labels": { + "name": "consensus" + } + } + }))?; + + // Deploy namespace + let namespaces: Api = Api::all(client.clone()); + let post_params = PostParams::default(); + let result = namespaces.create(&post_params, &namespace).await?; + + println!("Created namespace {:?}", result); + } + + for i in 0..nodes { + let node_container_name = format!("consensus-node-0{}", i); + // Create a new pod object + let deployment: Deployment = serde_json::from_value(json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": node_container_name, + "namespace": "consensus" + }, + "spec": { + "selector": { + "matchLabels": { + "app": node_container_name + } + }, + "replicas": 1, + "template": { + "metadata": { + "labels": { + "app": node_container_name + } + }, + "spec": { + "containers": [ + { + "name": node_container_name, + "image": "consensus-node", + "env": [ + { + "name": "NODE_ID", + "value": format!("node_{}", i) + } + ], + "imagePullPolicy": "Never", + "ports": [ + { + "containerPort": 3054 + } + ], + "livenessProbe": { + "httpGet": { + "path": "/health", + "port": 3054 + } + }, + "readinessProbe": { + "httpGet": { + "path": "/health", + "port": 3054 + } + } + } + ] + } + } + } + }))?; + + // Create a Kubernetes API object for the pods resource + let deployments: Api = Api::namespaced(client.clone(), "consensus"); + + // Create the pod + let post_params = PostParams::default(); + let result = deployments.create(&post_params, &deployment).await?; + + println!("Created deployment {:?}", result); + } + + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let DeployerCLI { command } = DeployerCLI::parse(); + + match command { + DeployerCommands::GenerateConfig(args) => generate_config(args.nodes), + DeployerCommands::Deploy(args) => deploy(args.nodes).await, + } +} From 87d9defc06dc48b52464859e1480f178bcf487d1 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 29 Jan 2024 18:53:27 -0300 Subject: [PATCH 062/139] Override entrypoint to run node in k8s pod --- Dockerfile | 5 ++++- Makefile | 9 +++++---- docker-entrypoint.sh | 4 ++-- k8s_entrypoint.sh | 6 ++++++ node/tools/src/bin/deployer.rs | 1 + 5 files changed, 18 insertions(+), 7 deletions(-) create mode 100644 k8s_entrypoint.sh diff --git a/Dockerfile b/Dockerfile index 8d44e5e7..5f274ec1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,11 +13,14 @@ COPY --from=builder /node/target/release/executor . FROM debian:stable-slim as runtime COPY /node/tools/docker_binaries/executor /node/ -COPY /node/tools/docker-config/nodes-config /node/ +COPY /node/tools/k8s_configs/ /node/k8s_config +COPY /node/tools/docker-config/ /node/docker_config COPY docker-entrypoint.sh /node/ +COPY k8s_entrypoint.sh /node/ WORKDIR /node RUN chmod +x docker-entrypoint.sh +RUN chmod +x k8s_entrypoint.sh ENTRYPOINT ["./docker-entrypoint.sh"] diff --git a/Makefile b/Makefile index 7a1e9548..b7d6f981 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: node nodes_config docker_node_configs node_docker consensus_docker_example clean clean_docker addresses_file blank_configs +.PHONY: node nodes_config docker_nodes_config node_docker consensus_docker_example clean clean_docker addresses_file blank_configs NODE?=0 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools @@ -21,7 +21,7 @@ docker_node_image: docker build -t consensus-node --target=runtime . docker_nodes_config: - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config/nodes-config + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config docker_node: $(MAKE) docker_node_image @@ -45,10 +45,11 @@ clean: clean_docker clean_k8s clean_k8s: rm -rf ${EXECUTABLE_NODE_DIR}/k8s_configs + kubectl delete deployments --all + kubectl delete pods --all clean_docker: - rm -rf ${EXECUTABLE_NODE_DIR}/docker-config/nodes-config - rm -rf ${EXECUTABLE_NODE_DIR}/docker_binaries + rm -rf ${EXECUTABLE_NODE_DIR}/docker-config docker rm -f consensus-node-1 docker rm -f consensus-node-2 docker network rm -f node-net diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 3286c01c..07a3c781 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/bash # This file works as an entrypoint of the docker container running the node binary copied inside of it. -cd ${NODE_ID} +cd docker_config/${NODE_ID} export RUST_LOG=INFO -../executor +../../executor diff --git a/k8s_entrypoint.sh b/k8s_entrypoint.sh new file mode 100644 index 00000000..62aaf5fa --- /dev/null +++ b/k8s_entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# This file works as an entrypoint of the kubernetes cluster running the node binary copied inside of it. + +cd k8s_config/${NODE_ID} +export RUST_LOG=INFO +../../executor diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 148330ff..90cf8da5 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -143,6 +143,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { "value": format!("node_{}", i) } ], + "command": ["./k8s_entrypoint.sh"], "imagePullPolicy": "Never", "ports": [ { From 251052f571fa5b7c0cb35f2db62823bf5f6383d9 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 29 Jan 2024 18:59:10 -0300 Subject: [PATCH 063/139] Add makefile target to run k8s pods --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b7d6f981..2f2727f0 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,12 @@ consensus_docker_example: stop_docker_nodes: docker stop consensus-node-1 consensus-node-2 +start_k8s_nodes: + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer generate-config --nodes ${NODES} + $(MAKE) docker_node_image + minikube image load consensus-node:latest + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer deploy --nodes ${NODES} + # Clean commands clean: clean_docker clean_k8s @@ -49,7 +55,6 @@ clean_k8s: kubectl delete pods --all clean_docker: - rm -rf ${EXECUTABLE_NODE_DIR}/docker-config docker rm -f consensus-node-1 docker rm -f consensus-node-2 docker network rm -f node-net From 10ba6d6556ec7ff9f67b79c70ac4d592a5610f66 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 12:07:18 -0300 Subject: [PATCH 064/139] Update nodes argument type to generate nodes config --- node/tools/src/bin/localnet_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 579e7b30..b2dd2e0f 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -50,7 +50,7 @@ fn main() -> anyhow::Result<()> { let rng = &mut rand::thread_rng(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); - let (mut default_config, validator_keys) = AppConfig::default_for(nodes as u64); + let (mut default_config, validator_keys) = AppConfig::default_for(nodes); if let Some(metrics_server_addr) = metrics_server_addr { default_config.with_metrics_server_addr(metrics_server_addr); From 991a8ac3290b6538c0aa5841da45fa5285864b67 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 12:08:21 -0300 Subject: [PATCH 065/139] Fix deployer config generation after merge --- node/tools/src/bin/deployer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 90cf8da5..758237c3 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -46,7 +46,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { let rng = &mut rand::thread_rng(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); - let (default_config, validator_keys) = AppConfig::default_for(nodes as u64); + let (default_config, validator_keys) = AppConfig::default_for(nodes); let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect(); // Construct a gossip network with optimal diameter. From 926b387ab6001e9f56b516c09373246b16b54ea5 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 13:10:21 -0300 Subject: [PATCH 066/139] Fix clippy and format warnings --- node/tools/src/bin/deployer.rs | 40 +++++++++++++------------ node/tools/src/bin/localnet_config.rs | 2 +- node/tools/src/config.rs | 42 +++++++-------------------- node/tools/src/lib.rs | 2 +- node/tools/src/main.rs | 2 +- 5 files changed, 35 insertions(+), 53 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 758237c3..09b745d1 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -3,31 +3,33 @@ use std::{fs, path::PathBuf}; use anyhow::Context; use clap::{Parser, Subcommand}; -use k8s_openapi::api::{ - apps::v1::Deployment, - core::v1::{Namespace, Pod}, -}; +use k8s_openapi::api::{apps::v1::Deployment, core::v1::Namespace}; use kube::{api::PostParams, Api, Client}; use rand::Rng; use serde_json::json; +use tracing::log::info; use zksync_consensus_crypto::TextFmt; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::node; use zksync_consensus_tools::AppConfig; /// Command line arguments. #[derive(Debug, Parser)] #[command(name = "deployer")] struct DeployerCLI { + /// Subcommand to run. #[command(subcommand)] command: DeployerCommands, } +/// Subcommand arguments. #[derive(Debug, Parser)] struct SubCommandArgs { + /// Number of nodes to deploy. #[arg(long)] nodes: usize, } +/// Subcommands. #[derive(Subcommand, Debug)] enum DeployerCommands { /// Generate configs for the nodes. @@ -36,13 +38,15 @@ enum DeployerCommands { Deploy(SubCommandArgs), } +/// Generates config for the nodes to run in the kubernetes cluster +/// Creates a directory for each node in the parent k8s_configs directory. fn generate_config(nodes: usize) -> anyhow::Result<()> { assert!(nodes > 0, "at least 1 node has to be specified"); - // Each node will have `gossip_peers` outbound peers. + // Each node will have `gossip_peers` inbound peers. let peers = 2; - // Generate the keys for all the replicas. + // Generate the node keys for all the replicas. let rng = &mut rand::thread_rng(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); @@ -50,10 +54,10 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect(); // Construct a gossip network with optimal diameter. - for i in 0..nodes { + for (i, node) in node_keys.iter().enumerate() { for j in 0..peers { let next = (i * peers + j + 1) % nodes; - cfgs[next].add_gossip_static_inbound(node_keys[i].public()); + cfgs[next].add_gossip_static_inbound(node.public()); } } @@ -81,15 +85,13 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { Ok(()) } +/// Deploys the nodes to the kubernetes cluster. async fn deploy(nodes: usize) -> anyhow::Result<()> { - // Create a Kubernetes client let client = Client::try_default().await?; - // Check if namespace consensus is already deployed let namespaces: Api = Api::all(client.clone()); let consensus_namespace = namespaces.get_opt("consensus").await?; if consensus_namespace.is_none() { - // Create a new namespace object let namespace: Namespace = serde_json::from_value(json!({ "apiVersion": "v1", "kind": "Namespace", @@ -101,17 +103,20 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { } }))?; - // Deploy namespace let namespaces: Api = Api::all(client.clone()); let post_params = PostParams::default(); let result = namespaces.create(&post_params, &namespace).await?; - println!("Created namespace {:?}", result); + info!("Namespace: {} ,created", result.metadata.name.unwrap()); + } else { + info!( + "Namespace: {} ,already exists", + consensus_namespace.unwrap().metadata.name.unwrap() + ); } for i in 0..nodes { let node_container_name = format!("consensus-node-0{}", i); - // Create a new pod object let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", @@ -169,14 +174,11 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { } }))?; - // Create a Kubernetes API object for the pods resource let deployments: Api = Api::namespaced(client.clone(), "consensus"); - - // Create the pod let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; - println!("Created deployment {:?}", result); + info!("Deployment: {} , created", result.metadata.name.unwrap()); } Ok(()) diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index b2dd2e0f..60a74749 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -4,7 +4,7 @@ use clap::Parser; use rand::Rng; use std::{fs, net::SocketAddr, path::PathBuf}; use zksync_consensus_crypto::TextFmt; -use zksync_consensus_roles::{node, validator}; +use zksync_consensus_roles::node; use zksync_consensus_tools::AppConfig; /// Command line arguments. diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 0b53b644..57ae13eb 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -1,10 +1,8 @@ //! Node configuration. use crate::{proto, store}; use anyhow::Context as _; -use bft::testonly; -use rand::Rng; use std::{ - collections::{hash_map::RandomState, HashMap, HashSet}, + collections::{HashMap, HashSet}, fs, net::SocketAddr, path::{Path, PathBuf}, @@ -41,8 +39,8 @@ fn encode_json(x: &T) -> String { /// Pair of (public key, ip address) for a gossip network node. #[derive(Debug, Clone)] pub struct NodeAddr { - pub key: node::PublicKey, - pub addr: std::net::SocketAddr, + pub key: PublicKey, + pub addr: SocketAddr, } impl ProtoFmt for NodeAddr { @@ -75,8 +73,8 @@ pub struct AppConfig { pub max_payload_size: usize, pub gossip_dynamic_inbound_limit: usize, - pub gossip_static_inbound: HashSet, - pub gossip_static_outbound: HashMap, + pub gossip_static_inbound: HashSet, + pub gossip_static_outbound: HashMap, } impl ProtoFmt for AppConfig { @@ -214,15 +212,13 @@ impl AppConfig { pub fn default_for(nodes_amount: usize) -> (AppConfig, Vec) { // Generate the keys for all the replicas. let rng = &mut rand::thread_rng(); - let validator_keys: Vec = - (0..nodes_amount).map(|_| rng.gen()).collect(); - let mut genesis = validator::GenesisSetup::empty(rng, nodes_amount); - genesis - .next_block() - .payload(validator::Payload(vec![])) - .push(); - let validator_keys = genesis.keys.clone(); + let mut genesis = validator::GenesisSetup::empty(rng, nodes_amount); + genesis + .next_block() + .payload(validator::Payload(vec![])) + .push(); + let validator_keys = genesis.keys.clone(); ( Self { @@ -260,22 +256,6 @@ impl AppConfig { self } - pub fn with_gossip_static_inbound( - &mut self, - gossip_static_inbound: HashSet, - ) -> &mut Self { - self.gossip_static_inbound = gossip_static_inbound; - self - } - - pub fn with_gossip_static_outbound( - &mut self, - gossip_static_outbound: HashMap, - ) -> &mut Self { - self.gossip_static_outbound = gossip_static_outbound; - self - } - pub fn add_gossip_static_outbound(&mut self, key: PublicKey, addr: SocketAddr) -> &mut Self { self.gossip_static_outbound.insert(key, addr); self diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index 28f40e3e..b728c38b 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -8,5 +8,5 @@ mod store; #[cfg(test)] mod tests; -pub use rpc::server; pub use config::{decode_json, AppConfig, ConfigPaths, NodeAddr}; +pub use rpc::server; diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 98512df4..265fddd8 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -7,7 +7,7 @@ use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; use vise_exporter::MetricsExporter; use zksync_concurrency::{ctx, scope}; -use zksync_consensus_tools::{server, decode_json, ConfigPaths, NodeAddr}; +use zksync_consensus_tools::{decode_json, server, ConfigPaths, NodeAddr}; use zksync_consensus_utils::no_copy::NoCopy; use zksync_protobuf::serde::Serde; From 5fca6b8a16b3286ff4400f1f655997d06ce94f80 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 18:44:02 -0300 Subject: [PATCH 067/139] Improve RPC server API to scale it easier --- node/tools/src/lib.rs | 2 +- node/tools/src/main.rs | 21 ++++--- node/tools/src/rpc/methods/health_check.rs | 24 +++++--- node/tools/src/rpc/methods/mod.rs | 8 +++ node/tools/src/rpc/server.rs | 66 +++++++++++++--------- 5 files changed, 78 insertions(+), 43 deletions(-) diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index 28f40e3e..f84a00f8 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -8,5 +8,5 @@ mod store; #[cfg(test)] mod tests; -pub use rpc::server; pub use config::{decode_json, AppConfig, ConfigPaths, NodeAddr}; +pub use rpc::server::RPCServer; diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 98512df4..0fa6e09b 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -7,7 +7,7 @@ use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; use vise_exporter::MetricsExporter; use zksync_concurrency::{ctx, scope}; -use zksync_consensus_tools::{server, decode_json, ConfigPaths, NodeAddr}; +use zksync_consensus_tools::{decode_json, ConfigPaths, NodeAddr, RPCServer}; use zksync_consensus_utils::no_copy::NoCopy; use zksync_protobuf::serde::Serde; @@ -43,7 +43,7 @@ struct Args { rpc_port: Option, /// IP address and key of the seed peers. #[arg(long)] - add_gossip_static_outbound: NodeAddrs, + add_gossip_static_outbound: Option, } impl Args { @@ -103,12 +103,14 @@ async fn main() -> anyhow::Result<()> { .context("config_paths().load()")?; // Add gossipStaticOutbound pairs from cli to config - configs.app.gossip_static_outbound.extend( - args.add_gossip_static_outbound - .0 - .into_iter() - .map(|e| (e.0.key, e.0.addr)), - ); + if let Some(gossip_static_outbound) = args.add_gossip_static_outbound { + configs.app.gossip_static_outbound.extend( + gossip_static_outbound + .0 + .into_iter() + .map(|e| (e.0.key, e.0.addr)), + ); + } let (executor, runner) = configs .make_executor(ctx) @@ -121,6 +123,7 @@ async fn main() -> anyhow::Result<()> { } else { rpc_addr.set_port(rpc_addr.port() + 100); } + let rpc_server = RPCServer::new(rpc_addr); // Initialize the storage. scope::run!(ctx, |ctx, s| async { @@ -137,7 +140,7 @@ async fn main() -> anyhow::Result<()> { } s.spawn_bg(runner.run(ctx)); s.spawn(executor.run(ctx)); - s.spawn(server::run_server(rpc_addr)); + s.spawn(rpc_server.run()); Ok(()) }) .await diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 8b826991..27a85a56 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,12 +1,22 @@ //! Health check method for RPC server. use jsonrpsee::types::Params; -/// Health check response for /health endpoint. -pub(crate) fn callback(_params: Params) -> serde_json::Value { - serde_json::json!({"health": true}) -} +use super::RPCMethod; + +pub(crate) struct HealthCheck; + +impl RPCMethod for HealthCheck { + /// Health check response for /health endpoint. + fn callback(_params: Params) -> serde_json::Value { + serde_json::json!({"health": true}) + } + + /// Health check method name. + fn method() -> &'static str { + "health_check" + } -/// Health check method name. -pub(crate) fn method() -> &'static str { - "health_check" + fn path() -> &'static str { + "/health" + } } diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 4bd7bd48..0b5d1b8c 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1 +1,9 @@ +use jsonrpsee::types::Params; + +pub(crate) trait RPCMethod { + fn callback(params: Params) -> serde_json::Value; + fn method() -> &'static str; + fn path() -> &'static str; +} + pub(crate) mod health_check; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 70a8468d..bdb8831a 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,31 +1,45 @@ use std::net::SocketAddr; -use super::methods::health_check; +use super::methods::{health_check::HealthCheck, RPCMethod}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; +use zksync_concurrency::ctx::Ctx; -pub async fn run_server(ip_address: SocketAddr) -> anyhow::Result<()> { - // Custom tower service to handle the RPC requests - let service_builder = tower::ServiceBuilder::new() - // Proxy `GET /health` requests to internal `system_health` method. - .layer(ProxyGetRequestLayer::new( - "/health", - health_check::method(), - )?); - - let server = Server::builder() - .set_http_middleware(service_builder) - .build(ip_address) - .await?; - let mut module = RpcModule::new(()); - module.register_method(health_check::method(), |params, _| { - health_check::callback(params) - })?; - - let handle = server.start(module); - - // In this example we don't care about doing shutdown so let's it run forever. - // You may use the `ServerHandle` to shut it down or manage it yourself. - tokio::spawn(handle.stopped()); - - Ok(()) +/// RPC server. +pub struct RPCServer { + /// IP address to bind to. + ip_address: SocketAddr, +} + +impl RPCServer { + pub fn new(ip_address: SocketAddr) -> Self { + Self { ip_address } + } + + /// Runs the RPC server. + pub async fn run(&self) -> anyhow::Result<()> { + // Custom tower service to handle the RPC requests + let service_builder = tower::ServiceBuilder::new() + // Proxy `GET /health` requests to internal `system_health` method. + .layer(ProxyGetRequestLayer::new( + HealthCheck::path(), + HealthCheck::method(), + )?); + + let server = Server::builder() + .set_http_middleware(service_builder) + .build(self.ip_address) + .await?; + + let mut module = RpcModule::new(()); + module.register_method(HealthCheck::method(), |params, _| { + HealthCheck::callback(params) + })?; + + let handle = server.start(module); + + // In this example we don't care about doing shutdown so let's it run forever. + // You may use the `ServerHandle` to shut it down or manage it yourself. + tokio::spawn(handle.stopped()); + Ok(()) + } } From 0726affea42fc2265c11daa8c4097329e6e168e0 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 18:44:56 -0300 Subject: [PATCH 068/139] Fix concurrency management for rpc server --- node/tools/src/main.rs | 2 +- node/tools/src/rpc/server.rs | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 0fa6e09b..22e3390d 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -140,7 +140,7 @@ async fn main() -> anyhow::Result<()> { } s.spawn_bg(runner.run(ctx)); s.spawn(executor.run(ctx)); - s.spawn(rpc_server.run()); + s.spawn(rpc_server.run(ctx)); Ok(()) }) .await diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index bdb8831a..253bebe9 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -16,7 +16,7 @@ impl RPCServer { } /// Runs the RPC server. - pub async fn run(&self) -> anyhow::Result<()> { + pub async fn run(&self, ctx: &Ctx) -> anyhow::Result<()> { // Custom tower service to handle the RPC requests let service_builder = tower::ServiceBuilder::new() // Proxy `GET /health` requests to internal `system_health` method. @@ -37,9 +37,7 @@ impl RPCServer { let handle = server.start(module); - // In this example we don't care about doing shutdown so let's it run forever. - // You may use the `ServerHandle` to shut it down or manage it yourself. - tokio::spawn(handle.stopped()); + ctx.wait(handle.stopped()).await?; Ok(()) } } From 21fb1c6b4f1c0da282a587d3b0d89ac558ea0380 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 18:50:22 -0300 Subject: [PATCH 069/139] Add missing docs to solve clippy warnings --- node/tools/src/rpc/methods/health_check.rs | 1 + node/tools/src/rpc/methods/mod.rs | 4 ++++ node/tools/src/rpc/mod.rs | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 27a85a56..63f27c4e 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -3,6 +3,7 @@ use jsonrpsee::types::Params; use super::RPCMethod; +/// Health check method for RPC server. pub(crate) struct HealthCheck; impl RPCMethod for HealthCheck { diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 0b5d1b8c..b18a254f 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,8 +1,12 @@ use jsonrpsee::types::Params; +/// Trait to implement for new RPC methods. pub(crate) trait RPCMethod { + /// Method response logic when called. fn callback(params: Params) -> serde_json::Value; + /// Method name. fn method() -> &'static str; + /// Method path for GET requests. fn path() -> &'static str; } diff --git a/node/tools/src/rpc/mod.rs b/node/tools/src/rpc/mod.rs index c256bad3..89bd9b90 100644 --- a/node/tools/src/rpc/mod.rs +++ b/node/tools/src/rpc/mod.rs @@ -1,3 +1,4 @@ //! RPC server for testing purposes. mod methods; -pub mod server; +/// Module for the RPC server implementation. +pub(crate) mod server; From 41ec845a524a4ce366b30e6a3463ae5de03f3069 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 30 Jan 2024 18:51:41 -0300 Subject: [PATCH 070/139] Revert change for executor new flag --- node/tools/src/main.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 22e3390d..675932ac 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -43,7 +43,7 @@ struct Args { rpc_port: Option, /// IP address and key of the seed peers. #[arg(long)] - add_gossip_static_outbound: Option, + add_gossip_static_outbound: NodeAddrs, } impl Args { @@ -103,14 +103,12 @@ async fn main() -> anyhow::Result<()> { .context("config_paths().load()")?; // Add gossipStaticOutbound pairs from cli to config - if let Some(gossip_static_outbound) = args.add_gossip_static_outbound { - configs.app.gossip_static_outbound.extend( - gossip_static_outbound - .0 - .into_iter() - .map(|e| (e.0.key, e.0.addr)), - ); - } + configs.app.gossip_static_outbound.extend( + args.add_gossip_static_outbound + .0 + .into_iter() + .map(|e| (e.0.key, e.0.addr)), + ); let (executor, runner) = configs .make_executor(ctx) From 7ba8cb3b202d8c0e67463e812322a1adb33dbf7f Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Wed, 31 Jan 2024 19:28:39 +0100 Subject: [PATCH 071/139] shutting down server when context is cancelled --- node/tools/src/rpc/methods/health_check.rs | 3 +-- node/tools/src/rpc/server.rs | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 63f27c4e..172285cf 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,7 +1,6 @@ //! Health check method for RPC server. -use jsonrpsee::types::Params; - use super::RPCMethod; +use jsonrpsee::types::Params; /// Health check method for RPC server. pub(crate) struct HealthCheck; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 253bebe9..125cffc3 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,8 +1,7 @@ -use std::net::SocketAddr; - use super::methods::{health_check::HealthCheck, RPCMethod}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; -use zksync_concurrency::ctx::Ctx; +use std::net::SocketAddr; +use zksync_concurrency::{ctx, scope}; /// RPC server. pub struct RPCServer { @@ -16,7 +15,7 @@ impl RPCServer { } /// Runs the RPC server. - pub async fn run(&self, ctx: &Ctx) -> anyhow::Result<()> { + pub async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // Custom tower service to handle the RPC requests let service_builder = tower::ServiceBuilder::new() // Proxy `GET /health` requests to internal `system_health` method. @@ -36,8 +35,16 @@ impl RPCServer { })?; let handle = server.start(module); - - ctx.wait(handle.stopped()).await?; - Ok(()) + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(async { + ctx.canceled().await; + // Ignore `AlreadyStoppedError`. + let _ = handle.stop(); + Ok(()) + }); + handle.clone().stopped().await; + Ok(()) + }) + .await } } From d859f391eb73f8d620cd49581aebe6a2cf674542 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 1 Feb 2024 10:00:34 -0300 Subject: [PATCH 072/139] Removed k8s yml files and modularized kube.rs access --- node/Cargo.lock | 1 + node/Cargo.toml | 1 + node/tools/Cargo.toml | 1 + node/tools/k8s/k8s-deployment.yml | 32 ---------- node/tools/k8s/k8s-namespace.yml | 6 -- node/tools/k8s/k8s-service.yml | 12 ---- node/tools/src/bin/deployer.rs | 102 +++--------------------------- node/tools/src/lib.rs | 1 + 8 files changed, 12 insertions(+), 144 deletions(-) delete mode 100644 node/tools/k8s/k8s-deployment.yml delete mode 100644 node/tools/k8s/k8s-namespace.yml delete mode 100644 node/tools/k8s/k8s-service.yml diff --git a/node/Cargo.lock b/node/Cargo.lock index 871a8a40..69d76062 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -4060,6 +4060,7 @@ dependencies = [ "anyhow", "async-trait", "clap", + "futures", "jsonrpsee", "k8s-openapi", "kube", diff --git a/node/Cargo.toml b/node/Cargo.toml index 90d40efd..9e9775d7 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -86,6 +86,7 @@ jsonrpsee = { version = "0.21.0", features = ["server", "http-client", "ws-clien tower = { version = "0.4.13", features = ["full"] } kube = { version = "0.88.1", features = ["runtime", "derive"] } k8s-openapi = { version = "0.21.0", features = ["latest"] } +futures = "0.3.1" # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 3546e97c..49f3b039 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -33,6 +33,7 @@ jsonrpsee.workspace = true tower.workspace = true kube.workspace = true k8s-openapi.workspace = true +futures.workspace = true [dev-dependencies] tempfile.workspace = true diff --git a/node/tools/k8s/k8s-deployment.yml b/node/tools/k8s/k8s-deployment.yml deleted file mode 100644 index a8ec145b..00000000 --- a/node/tools/k8s/k8s-deployment.yml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: consensus-node-01 - namespace: consensus -spec: - selector: - matchLabels: - app: consensus-node-01 - replicas: 1 - template: - metadata: - labels: - app: consensus-node-01 - spec: - containers: - - name: consensus-node-01 - image: consensus-node-01 - env: - - name: NODE_ID - value: "node_0" - imagePullPolicy: Never - ports: - - containerPort: 3054 - livenessProbe: - httpGet: - path: /greeting - port: 3054 - readinessProbe: - httpGet: - path: /greeting - port: 3054 diff --git a/node/tools/k8s/k8s-namespace.yml b/node/tools/k8s/k8s-namespace.yml deleted file mode 100644 index 2d977ff5..00000000 --- a/node/tools/k8s/k8s-namespace.yml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: consensus - labels: - name: consensus \ No newline at end of file diff --git a/node/tools/k8s/k8s-service.yml b/node/tools/k8s/k8s-service.yml deleted file mode 100644 index fff121ff..00000000 --- a/node/tools/k8s/k8s-service.yml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: consensus-node-01 - namespace: consensus -spec: - type: NodePort - selector: - app: consensus-node-01 - ports: - - port: 3054 - nodePort: 30088 \ No newline at end of file diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 09b745d1..a01e0ddb 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -3,13 +3,10 @@ use std::{fs, path::PathBuf}; use anyhow::Context; use clap::{Parser, Subcommand}; -use k8s_openapi::api::{apps::v1::Deployment, core::v1::Namespace}; -use kube::{api::PostParams, Api, Client}; use rand::Rng; -use serde_json::json; -use tracing::log::info; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles::node; +use zksync_consensus_tools::k8s; use zksync_consensus_tools::AppConfig; /// Command line arguments. @@ -87,98 +84,15 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { /// Deploys the nodes to the kubernetes cluster. async fn deploy(nodes: usize) -> anyhow::Result<()> { - let client = Client::try_default().await?; - - let namespaces: Api = Api::all(client.clone()); - let consensus_namespace = namespaces.get_opt("consensus").await?; - if consensus_namespace.is_none() { - let namespace: Namespace = serde_json::from_value(json!({ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "consensus", - "labels": { - "name": "consensus" - } - } - }))?; - - let namespaces: Api = Api::all(client.clone()); - let post_params = PostParams::default(); - let result = namespaces.create(&post_params, &namespace).await?; - - info!("Namespace: {} ,created", result.metadata.name.unwrap()); - } else { - info!( - "Namespace: {} ,already exists", - consensus_namespace.unwrap().metadata.name.unwrap() - ); - } + let client = k8s::get_client()?; + k8s::create_or_reuse_namespace(&client, "consensus")?; for i in 0..nodes { - let node_container_name = format!("consensus-node-0{}", i); - let deployment: Deployment = serde_json::from_value(json!({ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": node_container_name, - "namespace": "consensus" - }, - "spec": { - "selector": { - "matchLabels": { - "app": node_container_name - } - }, - "replicas": 1, - "template": { - "metadata": { - "labels": { - "app": node_container_name - } - }, - "spec": { - "containers": [ - { - "name": node_container_name, - "image": "consensus-node", - "env": [ - { - "name": "NODE_ID", - "value": format!("node_{}", i) - } - ], - "command": ["./k8s_entrypoint.sh"], - "imagePullPolicy": "Never", - "ports": [ - { - "containerPort": 3054 - } - ], - "livenessProbe": { - "httpGet": { - "path": "/health", - "port": 3054 - } - }, - "readinessProbe": { - "httpGet": { - "path": "/health", - "port": 3054 - } - } - } - ] - } - } - } - }))?; - - let deployments: Api = Api::namespaced(client.clone(), "consensus"); - let post_params = PostParams::default(); - let result = deployments.create(&post_params, &deployment).await?; - - info!("Deployment: {} , created", result.metadata.name.unwrap()); + k8s::create_deployment( + &client, + &format!("consensus-node-0{i}"), + &format!("node_{i}"), + )?; } Ok(()) diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index b728c38b..ec248f5d 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -1,6 +1,7 @@ //! CLI tools for the consensus node. #![allow(missing_docs)] mod config; +pub mod k8s; mod proto; mod rpc; mod store; From 95ec2b7664397c745efdf89d5621acff4ceeb817 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 1 Feb 2024 17:55:20 -0300 Subject: [PATCH 073/139] Update consensus docker example with new rpc ports --- compose.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compose.yaml b/compose.yaml index dc904b3a..c62357cf 100644 --- a/compose.yaml +++ b/compose.yaml @@ -6,7 +6,7 @@ services: image: consensus-node container_name: consensus-node-1 ports: - - "3051:3051" + - "3154:3154" networks: node_net: # This allow us to know the ip of the node-1 container to fill the address in the config file @@ -16,7 +16,7 @@ services: image: consensus-node container_name: consensus-node-2 ports: - - "3051:3051" + - "3155:3154" networks: node_net: # This allow us to know the ip of the node-2 container to fill the address in the config file From d697cc169cdfc3c1e624a80e86696466b2fd1ec3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 1 Feb 2024 17:55:48 -0300 Subject: [PATCH 074/139] Add new endpoint to get peers of the node --- node/tools/src/rpc/methods/health_check.rs | 7 ++-- node/tools/src/rpc/methods/mod.rs | 6 ++- node/tools/src/rpc/methods/peers.rs | 45 ++++++++++++++++++++++ node/tools/src/rpc/server.rs | 15 ++++++-- 4 files changed, 65 insertions(+), 8 deletions(-) create mode 100644 node/tools/src/rpc/methods/peers.rs diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 172285cf..b7e6756c 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,14 +1,14 @@ //! Health check method for RPC server. use super::RPCMethod; -use jsonrpsee::types::Params; +use jsonrpsee::types::{error::ErrorCode, ErrorObjectOwned, Params}; /// Health check method for RPC server. pub(crate) struct HealthCheck; impl RPCMethod for HealthCheck { /// Health check response for /health endpoint. - fn callback(_params: Params) -> serde_json::Value { - serde_json::json!({"health": true}) + fn callback(_params: Params) -> Result { + Ok(serde_json::json!({"health": true})) } /// Health check method name. @@ -16,6 +16,7 @@ impl RPCMethod for HealthCheck { "health_check" } + /// Method path for GET requests. fn path() -> &'static str { "/health" } diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index b18a254f..60fab2ee 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,9 +1,10 @@ -use jsonrpsee::types::Params; +use anyhow::Error; +use jsonrpsee::types::{error::ErrorCode, ErrorObject, ErrorObjectOwned, Params}; /// Trait to implement for new RPC methods. pub(crate) trait RPCMethod { /// Method response logic when called. - fn callback(params: Params) -> serde_json::Value; + fn callback(params: Params) -> Result; /// Method name. fn method() -> &'static str; /// Method path for GET requests. @@ -11,3 +12,4 @@ pub(crate) trait RPCMethod { } pub(crate) mod health_check; +pub(crate) mod peers; diff --git a/node/tools/src/rpc/methods/peers.rs b/node/tools/src/rpc/methods/peers.rs new file mode 100644 index 00000000..6054ef68 --- /dev/null +++ b/node/tools/src/rpc/methods/peers.rs @@ -0,0 +1,45 @@ +//! Peers method for RPC server. +use crate::{decode_json, AppConfig}; + +use super::RPCMethod; +use anyhow::Error; +use jsonrpsee::{ + types::{error::ErrorCode, ErrorObjectOwned, Params}, + MethodResponse, +}; +use std::fs::{self}; +use zksync_consensus_crypto::TextFmt; +use zksync_protobuf::serde::Serde; + +/// Peers method for RPC server. +pub(crate) struct PeersInfo; + +impl RPCMethod for PeersInfo { + /// Peers response for /peers endpoint. + fn callback(_params: Params) -> Result { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + let node_config = + fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; + let node_config = decode_json::>(&node_config) + .map_err(|_e| ErrorCode::InternalError)? + .0; + let peers: Vec = node_config + .gossip_static_inbound + .iter() + .map(|x| x.encode()) + .collect(); + Ok(serde_json::json!({ + "peers": peers + })) + } + + /// Peers method name. + fn method() -> &'static str { + "peers" + } + + /// Method path for GET requests. + fn path() -> &'static str { + "/peers" + } +} diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 125cffc3..0183b692 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,5 +1,9 @@ -use super::methods::{health_check::HealthCheck, RPCMethod}; -use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; +use super::methods::{health_check::HealthCheck, peers::PeersInfo, RPCMethod}; +use jsonrpsee::{ + server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}, + types::{error::ErrorCode, ErrorObject}, + MethodResponse, +}; use std::net::SocketAddr; use zksync_concurrency::{ctx, scope}; @@ -18,10 +22,14 @@ impl RPCServer { pub async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { // Custom tower service to handle the RPC requests let service_builder = tower::ServiceBuilder::new() - // Proxy `GET /health` requests to internal `system_health` method. + // Proxy `GET /` requests to internal methods. .layer(ProxyGetRequestLayer::new( HealthCheck::path(), HealthCheck::method(), + )?) + .layer(ProxyGetRequestLayer::new( + PeersInfo::path(), + PeersInfo::method(), )?); let server = Server::builder() @@ -33,6 +41,7 @@ impl RPCServer { module.register_method(HealthCheck::method(), |params, _| { HealthCheck::callback(params) })?; + module.register_method(PeersInfo::method(), |params, _| PeersInfo::callback(params))?; let handle = server.start(module); scope::run!(ctx, |ctx, s| async { From 7fe6ed9693df578dd7a8803b8b6599fe58d913f6 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 1 Feb 2024 18:07:13 -0300 Subject: [PATCH 075/139] Fix format and linter --- node/tools/src/rpc/methods/health_check.rs | 2 +- node/tools/src/rpc/methods/mod.rs | 3 +-- node/tools/src/rpc/methods/peers.rs | 6 +----- node/tools/src/rpc/server.rs | 6 +----- 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index b7e6756c..8d32107a 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,6 +1,6 @@ //! Health check method for RPC server. use super::RPCMethod; -use jsonrpsee::types::{error::ErrorCode, ErrorObjectOwned, Params}; +use jsonrpsee::types::{error::ErrorCode, Params}; /// Health check method for RPC server. pub(crate) struct HealthCheck; diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 60fab2ee..2cac6a52 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,5 +1,4 @@ -use anyhow::Error; -use jsonrpsee::types::{error::ErrorCode, ErrorObject, ErrorObjectOwned, Params}; +use jsonrpsee::types::{error::ErrorCode, Params}; /// Trait to implement for new RPC methods. pub(crate) trait RPCMethod { diff --git a/node/tools/src/rpc/methods/peers.rs b/node/tools/src/rpc/methods/peers.rs index 6054ef68..35f974d4 100644 --- a/node/tools/src/rpc/methods/peers.rs +++ b/node/tools/src/rpc/methods/peers.rs @@ -2,11 +2,7 @@ use crate::{decode_json, AppConfig}; use super::RPCMethod; -use anyhow::Error; -use jsonrpsee::{ - types::{error::ErrorCode, ErrorObjectOwned, Params}, - MethodResponse, -}; +use jsonrpsee::types::{error::ErrorCode, Params}; use std::fs::{self}; use zksync_consensus_crypto::TextFmt; use zksync_protobuf::serde::Serde; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 0183b692..6d2d5131 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,9 +1,5 @@ use super::methods::{health_check::HealthCheck, peers::PeersInfo, RPCMethod}; -use jsonrpsee::{ - server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}, - types::{error::ErrorCode, ErrorObject}, - MethodResponse, -}; +use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use std::net::SocketAddr; use zksync_concurrency::{ctx, scope}; From 57f600a8e6faf7a1cbc388b1ac2a1af9970e4776 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 1 Feb 2024 18:39:25 -0300 Subject: [PATCH 076/139] Update cargo.lock --- node/Cargo.lock | 101 ++++++++++++++++++++++++++++-------------------- 1 file changed, 59 insertions(+), 42 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 11d61029..928033b5 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -83,9 +83,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "2faccea4cc4ab4a667ce676a30e8ec13922a692c99bb8f5b11f1502c72e04220" [[package]] name = "anstyle-parse" @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -381,15 +381,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -569,6 +569,12 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-common" version = "0.1.6" @@ -950,7 +956,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util", @@ -959,9 +965,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "hashbrown" @@ -1122,9 +1132,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1362,9 +1372,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -1394,9 +1404,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.14" +version = "1.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" +checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" dependencies = [ "cc", "pkg-config", @@ -1588,6 +1598,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" version = "0.1.45" @@ -1725,23 +1741,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.2", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", @@ -1867,9 +1883,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" dependencies = [ "toml_datetime", "toml_edit", @@ -2135,7 +2151,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.4", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -2150,9 +2166,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7fa1134405e2ec9353fd416b17f8dacd46c473d7d3fd1cf202706a14eb792a" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", @@ -2224,9 +2240,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.30" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -2403,9 +2419,9 @@ checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -2422,9 +2438,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", @@ -2433,9 +2449,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", "ryu", @@ -2536,9 +2552,9 @@ checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "snow" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ "aes-gcm", "blake2", @@ -2706,11 +2722,12 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "fe80ced77cbfb4cb91a94bf72b378b4b6791a0d9b7f09d0be747d1bdff4e68bd" dependencies = [ "deranged", + "num-conv", "powerfmt", "serde", "time-core", @@ -2836,7 +2853,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] @@ -3327,9 +3344,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.34" +version = "0.5.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7cf47b659b318dccbd69cc4797a39ae128f533dce7902a1096044d1967b9c16" +checksum = "818ce546a11a9986bc24f93d0cdf38a8a1a400f1473ea8c82e59f6e0ffab9249" dependencies = [ "memchr", ] From 8ffa0c0899f69da0ff1df4f745704e116d4ef75a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 1 Feb 2024 18:59:02 -0300 Subject: [PATCH 077/139] Delete unnecesary features in added crates --- node/Cargo.lock | 497 +----------------------------------------------- node/Cargo.toml | 4 +- 2 files changed, 5 insertions(+), 496 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 928033b5..e0ca823a 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -127,17 +127,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-lock" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" -dependencies = [ - "event-listener", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-trait" version = "0.1.77" @@ -468,37 +457,12 @@ name = "compile-fmt" version = "0.1.0" source = "git+https://github.com/slowli/compile-fmt.git?rev=c6a41c846c9a6f70cdba4b44c9f3922242ffcf12#c6a41c846c9a6f70cdba4b44c9f3922242ffcf12" -[[package]] -name = "concurrent-queue" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - [[package]] name = "cpufeatures" version = "0.2.12" @@ -730,27 +694,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener", - "pin-project-lite", -] - [[package]] name = "fastrand" version = "2.0.1" @@ -803,15 +746,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] - [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -877,12 +811,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - [[package]] name = "futures-util" version = "0.3.30" @@ -956,7 +884,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.2", + "indexmap", "slab", "tokio", "tokio-util", @@ -973,28 +901,12 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" -[[package]] -name = "hdrhistogram" -version = "7.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" -dependencies = [ - "byteorder", - "num-traits", -] - [[package]] name = "heck" version = "0.4.1" @@ -1080,32 +992,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http", - "hyper", - "log", - "rustls 0.21.10", - "rustls-native-certs 0.6.3", - "tokio", - "tokio-rustls 0.24.1", -] - -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "im" version = "15.1.0" @@ -1120,16 +1006,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.2.2" @@ -1137,7 +1013,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -1208,36 +1084,10 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" dependencies = [ - "jsonrpsee-client-transport", "jsonrpsee-core", - "jsonrpsee-http-client", - "jsonrpsee-proc-macros", "jsonrpsee-server", "jsonrpsee-types", - "jsonrpsee-ws-client", - "tokio", - "tracing", -] - -[[package]] -name = "jsonrpsee-client-transport" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" -dependencies = [ - "futures-util", - "http", - "jsonrpsee-core", - "pin-project", - "rustls-native-certs 0.7.0", - "rustls-pki-types", - "soketto", - "thiserror", "tokio", - "tokio-rustls 0.25.0", - "tokio-util", - "tracing", - "url", ] [[package]] @@ -1247,56 +1097,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" dependencies = [ "anyhow", - "async-lock", "async-trait", "beef", - "futures-timer", "futures-util", "hyper", "jsonrpsee-types", "parking_lot", - "pin-project", "rand 0.8.5", "rustc-hash", "serde", "serde_json", "thiserror", "tokio", - "tokio-stream", - "tracing", -] - -[[package]] -name = "jsonrpsee-http-client" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" -dependencies = [ - "async-trait", - "hyper", - "hyper-rustls", - "jsonrpsee-core", - "jsonrpsee-types", - "serde", - "serde_json", - "thiserror", - "tokio", - "tower", "tracing", - "url", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94b7505034e2737e688e1153bf81e6f93ad296695c43958d6da2e4321f0a990" -dependencies = [ - "heck", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", ] [[package]] @@ -1336,19 +1149,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "jsonrpsee-ws-client" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "073c077471e89c4b511fa88b3df9a0f0abdf4a0a2e6683dd2ab36893af87bb2d" -dependencies = [ - "http", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", - "url", -] - [[package]] name = "keccak" version = "0.1.5" @@ -1660,12 +1460,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - [[package]] name = "ordered-float" version = "2.10.1" @@ -1693,12 +1487,6 @@ dependencies = [ "serde", ] -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.1" @@ -1728,12 +1516,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - [[package]] name = "petgraph" version = "0.6.4" @@ -1741,7 +1523,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.2", + "indexmap", ] [[package]] @@ -1881,16 +1663,6 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "proc-macro-crate" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" -dependencies = [ - "toml_datetime", - "toml_edit", -] - [[package]] name = "proc-macro2" version = "1.0.78" @@ -2187,20 +1959,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" -[[package]] -name = "ring" -version = "0.17.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" -dependencies = [ - "cc", - "getrandom", - "libc", - "spin", - "untrusted", - "windows-sys 0.48.0", -] - [[package]] name = "rocksdb" version = "0.21.0" @@ -2251,103 +2009,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rustls" -version = "0.21.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" -dependencies = [ - "log", - "ring", - "rustls-pki-types", - "rustls-webpki 0.102.1", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.4", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-native-certs" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" -dependencies = [ - "openssl-probe", - "rustls-pemfile 2.0.0", - "rustls-pki-types", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustls-pemfile" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" -dependencies = [ - "base64 0.21.7", - "rustls-pki-types", -] - -[[package]] -name = "rustls-pki-types" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" - -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.102.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - [[package]] name = "ryu" version = "1.0.16" @@ -2363,54 +2024,12 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "security-framework" -version = "2.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "1.0.21" @@ -2592,12 +2211,6 @@ dependencies = [ "sha-1", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" version = "0.7.3" @@ -2749,21 +2362,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - [[package]] name = "tokio" version = "1.35.1" @@ -2794,27 +2392,6 @@ dependencies = [ "syn 2.0.48", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.10", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.2", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-stream" version = "0.1.14" @@ -2841,39 +2418,12 @@ dependencies = [ "tracing", ] -[[package]] -name = "toml_datetime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" - -[[package]] -name = "toml_edit" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" -dependencies = [ - "indexmap 2.2.2", - "toml_datetime", - "winnow", -] - [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ - "futures-core", - "futures-util", - "hdrhistogram", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -2965,27 +2515,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-width" version = "0.1.11" @@ -3002,23 +2537,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "url" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - [[package]] name = "utf8parse" version = "0.2.1" @@ -3342,15 +2860,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" -[[package]] -name = "winnow" -version = "0.5.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "818ce546a11a9986bc24f93d0cdf38a8a1a400f1473ea8c82e59f6e0ffab9249" -dependencies = [ - "memchr", -] - [[package]] name = "yansi" version = "0.5.1" diff --git a/node/Cargo.toml b/node/Cargo.toml index 9503a5bc..df0a7b33 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -82,8 +82,8 @@ time = "0.3.23" tokio = { version = "1.34.0", features = ["full"] } tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } -jsonrpsee = { version = "0.21.0", features = ["server", "http-client", "ws-client", "macros", "client-ws-transport-native-tls"] } -tower = { version = "0.4.13", features = ["full"] } +jsonrpsee = { version = "0.21.0", features = ["server"] } +tower = { version = "0.4.13" } # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. From 44c979eb5d7b5c7d51ac01471e73263393aba34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 1 Feb 2024 19:52:57 -0300 Subject: [PATCH 078/139] Added k8s module --- node/tools/src/bin/deployer.rs | 15 +++-- node/tools/src/k8s.rs | 107 +++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+), 6 deletions(-) create mode 100644 node/tools/src/k8s.rs diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index a01e0ddb..cc644f8b 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -9,6 +9,8 @@ use zksync_consensus_roles::node; use zksync_consensus_tools::k8s; use zksync_consensus_tools::AppConfig; +const NAMESPACE: &str = "consensus"; + /// Command line arguments. #[derive(Debug, Parser)] #[command(name = "deployer")] @@ -62,7 +64,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { let root = PathBuf::from(manifest_path).join("k8s_configs"); let _ = fs::remove_dir_all(&root); for (i, cfg) in cfgs.into_iter().enumerate() { - let node_config_dir = root.join(format!("node_{}", i)); + let node_config_dir = root.join(format!("node_{i:0>2}")); fs::create_dir_all(&node_config_dir) .with_context(|| format!("create_dir_all({:?})", node_config_dir))?; @@ -83,15 +85,16 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { } /// Deploys the nodes to the kubernetes cluster. -async fn deploy(nodes: usize) -> anyhow::Result<()> { +fn deploy(nodes: usize) -> anyhow::Result<()> { let client = k8s::get_client()?; - k8s::create_or_reuse_namespace(&client, "consensus")?; + k8s::create_or_reuse_namespace(&client, NAMESPACE)?; for i in 0..nodes { k8s::create_deployment( &client, - &format!("consensus-node-0{i}"), - &format!("node_{i}"), + &format!("consensus_node_{i:0>2}"), + &format!("node_{i:0>2}"), + NAMESPACE, )?; } @@ -104,6 +107,6 @@ async fn main() -> anyhow::Result<()> { match command { DeployerCommands::GenerateConfig(args) => generate_config(args.nodes), - DeployerCommands::Deploy(args) => deploy(args.nodes).await, + DeployerCommands::Deploy(args) => deploy(args.nodes), } } diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs new file mode 100644 index 00000000..5a77dd2a --- /dev/null +++ b/node/tools/src/k8s.rs @@ -0,0 +1,107 @@ +use futures::executor; +use k8s_openapi::api::{apps::v1::Deployment, core::v1::Namespace}; +use kube::{api::PostParams, Api, Client}; +use serde_json::json; +use tracing::log::info; + +/// Get a kube client +pub fn get_client() -> anyhow::Result { + Ok(executor::block_on(Client::try_default())?) +} + +/// Creates a namespace in k8s cluster +pub fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result<()> { + let namespaces: Api = Api::all(client.clone()); + let consensus_namespace = executor::block_on(namespaces.get_opt(name))?; + if consensus_namespace.is_none() { + let namespace: Namespace = serde_json::from_value(json!({ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "consensus", + "labels": { + "name": "consensus" + } + } + }))?; + + let namespaces: Api = Api::all(client.clone()); + let post_params = PostParams::default(); + let result = executor::block_on(namespaces.create(&post_params, &namespace))?; + + info!("Namespace: {} ,created", result.metadata.name.unwrap()); + Ok(()) + } else { + info!( + "Namespace: {} ,already exists", + consensus_namespace.unwrap().metadata.name.unwrap() + ); + Ok(()) + } +} + +pub fn create_deployment(client: &Client, node_name: &str, node_id: &str, namespace: &str) -> anyhow::Result<()> { + let deployment: Deployment = serde_json::from_value(json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": node_name, + "namespace": "consensus" + }, + "spec": { + "selector": { + "matchLabels": { + "app": node_name + } + }, + "replicas": 1, + "template": { + "metadata": { + "labels": { + "app": node_name + } + }, + "spec": { + "containers": [ + { + "name": node_name, + "image": "consensus-node", + "env": [ + { + "name": "NODE_ID", + "value": node_id + } + ], + "command": ["./k8s_entrypoint.sh"], + "imagePullPolicy": "Never", + "ports": [ + { + "containerPort": 3054 + } + ], + "livenessProbe": { + "httpGet": { + "path": "/health", + "port": 3054 + } + }, + "readinessProbe": { + "httpGet": { + "path": "/health", + "port": 3054 + } + } + } + ] + } + } + } + }))?; + + let deployments: Api = Api::namespaced(client.clone(), namespace); + let post_params = PostParams::default(); + let result = executor::block_on(deployments.create(&post_params, &deployment))?; + + info!("Deployment: {} , created", result.metadata.name.unwrap()); + Ok(()) +} From 52575926204b5137289af53a0ffdcda7629ef8b3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 2 Feb 2024 10:35:42 -0300 Subject: [PATCH 079/139] Add old crate versions to deny.toml file --- node/deny.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/node/deny.toml b/node/deny.toml index f8535b33..37f6ede5 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -56,8 +56,10 @@ skip = [ { name = "rand", version = "0.4" }, { name = "syn", version = "1.0" }, - # Old versions required by criterion. - { name = "itertools", version = "0.10.5" }, + # Old versions required by jsonrpsee. + { name = "base64", version = "0.13.1" }, + { name = "block-buffer", version = "0.9.0" }, + { name = "digest", version = "0.10.7" }, ] [sources] From 75cd2cfbd2d1069e1ae85f441a8f2ec3f1830edd Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 2 Feb 2024 16:23:52 -0300 Subject: [PATCH 080/139] Use new app config api for test in tools crate --- node/libs/concurrency/Cargo.toml | 2 +- node/tools/src/config.rs | 14 ++++++++++++-- node/tools/src/k8s.rs | 7 ++++++- node/tools/src/tests.rs | 32 +++++++++++++++----------------- 4 files changed, 34 insertions(+), 21 deletions(-) diff --git a/node/libs/concurrency/Cargo.toml b/node/libs/concurrency/Cargo.toml index 2597d7fa..e305f684 100644 --- a/node/libs/concurrency/Cargo.toml +++ b/node/libs/concurrency/Cargo.toml @@ -23,4 +23,4 @@ vise.workspace = true workspace = true [dev-dependencies] -assert_matches.workspace = true \ No newline at end of file +assert_matches.workspace = true diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 57ae13eb..0377e60c 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -209,11 +209,11 @@ impl<'a> ConfigPaths<'a> { } impl AppConfig { - pub fn default_for(nodes_amount: usize) -> (AppConfig, Vec) { + pub fn default_for(validators_amount: usize) -> (AppConfig, Vec) { // Generate the keys for all the replicas. let rng = &mut rand::thread_rng(); - let mut genesis = validator::GenesisSetup::empty(rng, nodes_amount); + let mut genesis = validator::GenesisSetup::empty(rng, validators_amount); genesis .next_block() .payload(validator::Payload(vec![])) @@ -238,6 +238,11 @@ impl AppConfig { ) } + pub fn with_server_addr(&mut self, server_addr: SocketAddr) -> &mut Self { + self.server_addr = server_addr; + self + } + pub fn with_public_addr(&mut self, public_addr: SocketAddr) -> &mut Self { self.public_addr = public_addr; self @@ -269,6 +274,11 @@ impl AppConfig { pub fn write_to_file(&self, path: &Path) -> anyhow::Result<()> { fs::write(path.join("config.json"), encode_json(self)).context("fs::write()") } + + pub fn with_max_payload_size(&mut self, max_payload_size: usize) -> &mut Self { + self.max_payload_size = max_payload_size; + self + } } impl Configs { diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 5a77dd2a..45b6eb58 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -40,7 +40,12 @@ pub fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result< } } -pub fn create_deployment(client: &Client, node_name: &str, node_id: &str, namespace: &str) -> anyhow::Result<()> { +pub fn create_deployment( + client: &Client, + node_name: &str, + node_id: &str, + namespace: &str, +) -> anyhow::Result<()> { let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index 581f1ca2..39d3a4d3 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -15,23 +15,21 @@ fn make_addr(rng: &mut R) -> std::net::SocketAddr { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> AppConfig { - AppConfig { - server_addr: make_addr(rng), - public_addr: make_addr(rng), - metrics_server_addr: Some(make_addr(rng)), - - validators: rng.gen(), - genesis_block: rng.gen(), - - gossip_dynamic_inbound_limit: rng.gen(), - gossip_static_inbound: (0..5) - .map(|_| rng.gen::().public()) - .collect(), - gossip_static_outbound: (0..6) - .map(|_| (rng.gen::().public(), make_addr(rng))) - .collect(), - max_payload_size: rng.gen(), - } + let (mut config, _) = AppConfig::default_for(1); + config + .with_server_addr(make_addr(rng)) + .with_public_addr(make_addr(rng)) + .with_metrics_server_addr(make_addr(rng)) + .with_gossip_dynamic_inbound_limit(rng.gen()) + .with_gossip_dynamic_inbound_limit(rng.gen()) + .with_max_payload_size(rng.gen()); + (0..5).into_iter().for_each(|_| { + let _ = config.add_gossip_static_inbound(rng.gen::().public()); + }); + (0..6).into_iter().for_each(|_| { + let _ = config.add_gossip_static_outbound(rng.gen::().public(), make_addr(rng)); + }); + config } } From b0803dc1ec7c76478c531aaff5b1993a4fba7d98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Mon, 5 Feb 2024 09:58:25 -0300 Subject: [PATCH 081/139] Deploying seed peers in a different phase --- node/Cargo.lock | 1 - node/Cargo.toml | 1 - node/tools/Cargo.toml | 1 - node/tools/src/bin/deployer.rs | 45 +++++++++++++++++---- node/tools/src/config.rs | 13 ++++-- node/tools/src/k8s.rs | 73 +++++++++++++++++++++++++++++----- 6 files changed, 109 insertions(+), 25 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 1b21650e..5f42b1c4 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -3766,7 +3766,6 @@ dependencies = [ "anyhow", "async-trait", "clap", - "futures", "jsonrpsee", "k8s-openapi", "kube", diff --git a/node/Cargo.toml b/node/Cargo.toml index d1e653b5..5791d77b 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -84,7 +84,6 @@ tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } kube = { version = "0.88.1", features = ["runtime", "derive"] } k8s-openapi = { version = "0.21.0", features = ["latest"] } -futures = "0.3.1" jsonrpsee = { version = "0.21.0", features = ["server"] } tower = { version = "0.4.13" } diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 49f3b039..3546e97c 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -33,7 +33,6 @@ jsonrpsee.workspace = true tower.workspace = true kube.workspace = true k8s-openapi.workspace = true -futures.workspace = true [dev-dependencies] tempfile.workspace = true diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index cc644f8b..d04bde4f 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -6,8 +6,12 @@ use clap::{Parser, Subcommand}; use rand::Rng; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles::node; +use zksync_consensus_roles::node::PublicKey; +use zksync_consensus_tools::decode_json; use zksync_consensus_tools::k8s; use zksync_consensus_tools::AppConfig; +use zksync_consensus_tools::NodeAddr; +use zksync_protobuf::serde::Serde; const NAMESPACE: &str = "consensus"; @@ -85,17 +89,44 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { } /// Deploys the nodes to the kubernetes cluster. -fn deploy(nodes: usize) -> anyhow::Result<()> { - let client = k8s::get_client()?; - k8s::create_or_reuse_namespace(&client, NAMESPACE)?; +async fn deploy(nodes: usize) -> anyhow::Result<()> { + let client = k8s::get_client().await?; + k8s::create_or_reuse_namespace(&client, NAMESPACE).await?; - for i in 0..nodes { + // 20% of the nodes will be seed nodes + let seed_nodes = (nodes as f32 * 0.2).ceil() as usize; + + // deploy seed peer(s) + for i in 0..seed_nodes { k8s::create_deployment( &client, - &format!("consensus_node_{i:0>2}"), + &format!("consensus-node-{i:0>2}"), &format!("node_{i:0>2}"), + true, + vec![], // Seed peers don't have other peer information NAMESPACE, - )?; + ) + .await?; + } + + // obtain seed peer(s) IP(s) + let peers = k8s::get_seed_node_addrs(&client).await; + + // TODO recover public keys + // Using hardcoded data to test + let peers = [decode_json::>(&"{\"key\": \"node:public:ed25519:3059a9ae1de665f2e5dfcb64d5d3a044b2b3617436ddd56c2baf896878b13961\",\"addr\": \"10.13.33.0:3045\"}")?.0].to_vec(); + + // deploy the rest of nodes + for i in seed_nodes..nodes { + k8s::create_deployment( + &client, + &format!("consensus-node-{i:0>2}"), + &format!("node_{i:0>2}"), + false, + peers.clone(), + NAMESPACE, + ) + .await?; } Ok(()) @@ -107,6 +138,6 @@ async fn main() -> anyhow::Result<()> { match command { DeployerCommands::GenerateConfig(args) => generate_config(args.nodes), - DeployerCommands::Deploy(args) => deploy(args.nodes), + DeployerCommands::Deploy(args) => deploy(args.nodes).await, } } diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 0377e60c..81b18bed 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -30,11 +30,16 @@ pub fn decode_json(json: &str) -> anyhow::Result } /// Encodes a generated proto message to json for arbitrary ProtoFmt. -fn encode_json(x: &T) -> String { +pub fn encode_json(x: &T) -> String { let mut s = serde_json::Serializer::pretty(vec![]); - zksync_protobuf::serde::serialize(x, &mut s).unwrap(); + T::serialize(x, &mut s).unwrap(); String::from_utf8(s.into_inner()).unwrap() } +// pub fn encode_json(x: &T) -> String { +// let mut s = serde_json::Serializer::pretty(vec![]); +// zksync_protobuf::serde::serialize(x, &mut s).unwrap(); +// String::from_utf8(s.into_inner()).unwrap() +// } /// Pair of (public key, ip address) for a gossip network node. #[derive(Debug, Clone)] @@ -271,8 +276,8 @@ impl AppConfig { self } - pub fn write_to_file(&self, path: &Path) -> anyhow::Result<()> { - fs::write(path.join("config.json"), encode_json(self)).context("fs::write()") + pub fn write_to_file(self, path: &Path) -> anyhow::Result<()> { + fs::write(path.join("config.json"), encode_json(&Serde(self))).context("fs::write()") } pub fn with_max_payload_size(&mut self, max_payload_size: usize) -> &mut Self { diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 45b6eb58..6398902f 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,18 +1,27 @@ -use futures::executor; -use k8s_openapi::api::{apps::v1::Deployment, core::v1::Namespace}; -use kube::{api::PostParams, Api, Client}; +use std::collections::HashMap; + +use crate::NodeAddr; +use k8s_openapi::api::{ + apps::v1::Deployment, + core::v1::{Namespace, Pod}, +}; +use kube::{ + api::{ListParams, PostParams}, + Api, Client, ResourceExt, +}; use serde_json::json; use tracing::log::info; +use zksync_protobuf::serde::Serde; /// Get a kube client -pub fn get_client() -> anyhow::Result { - Ok(executor::block_on(Client::try_default())?) +pub async fn get_client() -> anyhow::Result { + Ok(Client::try_default().await?) } /// Creates a namespace in k8s cluster -pub fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result<()> { +pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result<()> { let namespaces: Api = Api::all(client.clone()); - let consensus_namespace = executor::block_on(namespaces.get_opt(name))?; + let consensus_namespace = namespaces.get_opt(name).await?; if consensus_namespace.is_none() { let namespace: Namespace = serde_json::from_value(json!({ "apiVersion": "v1", @@ -27,7 +36,7 @@ pub fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result< let namespaces: Api = Api::all(client.clone()); let post_params = PostParams::default(); - let result = executor::block_on(namespaces.create(&post_params, &namespace))?; + let result = namespaces.create(&post_params, &namespace).await?; info!("Namespace: {} ,created", result.metadata.name.unwrap()); Ok(()) @@ -40,12 +49,15 @@ pub fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result< } } -pub fn create_deployment( +pub async fn create_deployment( client: &Client, node_name: &str, node_id: &str, + is_seed: bool, + peers: Vec, namespace: &str, ) -> anyhow::Result<()> { + let cli_args = get_cli_args(peers); let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", @@ -63,7 +75,8 @@ pub fn create_deployment( "template": { "metadata": { "labels": { - "app": node_name + "app": node_name, + "seed": is_seed.to_string() } }, "spec": { @@ -78,6 +91,7 @@ pub fn create_deployment( } ], "command": ["./k8s_entrypoint.sh"], + "args": cli_args, "imagePullPolicy": "Never", "ports": [ { @@ -105,8 +119,45 @@ pub fn create_deployment( let deployments: Api = Api::namespaced(client.clone(), namespace); let post_params = PostParams::default(); - let result = executor::block_on(deployments.create(&post_params, &deployment))?; + let result = deployments.create(&post_params, &deployment).await?; info!("Deployment: {} , created", result.metadata.name.unwrap()); Ok(()) } + +/// Returns a HashMap with mapping: node_name -> IP address +pub async fn get_seed_node_addrs(client: &Client) -> HashMap { + let mut result = HashMap::new(); + let pods: Api = Api::namespaced(client.clone(), "consensus"); + + let lp = ListParams::default().labels("seed=true"); + for p in pods.list(&lp).await.unwrap() { + let node_name = p.labels()["app"].clone(); + result.insert(node_name, p.status.unwrap().pod_ip.unwrap()); + } + result +} + +fn get_cli_args(peers: Vec) -> Vec { + if peers.is_empty() { + [].to_vec() + } else { + [ + "--add-gossip-static-outbound".to_string(), + encode_json( + &peers + .iter() + .map(|e| Serde(e.clone())) + .collect::>>(), + ), + ] + .to_vec() + } +} + +/// Encodes a generated proto message to json for arbitrary ProtoFmt. +pub fn encode_json(x: &T) -> String { + let mut s = serde_json::Serializer::new(vec![]); + T::serialize(x, &mut s).unwrap(); + String::from_utf8(s.into_inner()).unwrap() +} From 3a249dc446c65d19d7c123cf7892556d96b84366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Mon, 5 Feb 2024 19:36:05 -0300 Subject: [PATCH 082/139] Retrieving seed peers IPs and using it in the rest of the peers --- node/tools/src/bin/deployer.rs | 51 +++++++++++++++++++++------------- node/tools/src/k8s.rs | 17 ++++++------ node/tools/src/tests.rs | 3 +- 3 files changed, 42 insertions(+), 29 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index d04bde4f..b9d4d457 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -1,19 +1,19 @@ //! Deployer for the kubernetes cluster. +use std::net::SocketAddr; +use std::str::FromStr; use std::{fs, path::PathBuf}; use anyhow::Context; use clap::{Parser, Subcommand}; use rand::Rng; -use zksync_consensus_crypto::TextFmt; -use zksync_consensus_roles::node; -use zksync_consensus_roles::node::PublicKey; -use zksync_consensus_tools::decode_json; +use zksync_consensus_crypto::{Text, TextFmt}; +use zksync_consensus_roles::node::{self, SecretKey}; use zksync_consensus_tools::k8s; use zksync_consensus_tools::AppConfig; use zksync_consensus_tools::NodeAddr; -use zksync_protobuf::serde::Serde; const NAMESPACE: &str = "consensus"; +const NODES_PORT: u16 = 3054; /// Command line arguments. #[derive(Debug, Parser)] @@ -100,8 +100,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { for i in 0..seed_nodes { k8s::create_deployment( &client, - &format!("consensus-node-{i:0>2}"), - &format!("node_{i:0>2}"), + i, true, vec![], // Seed peers don't have other peer information NAMESPACE, @@ -109,29 +108,41 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { .await?; } + // Waiting 15 secs to allow the pods to start + // TODO: should replace with some safer method + tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; + // obtain seed peer(s) IP(s) - let peers = k8s::get_seed_node_addrs(&client).await; + let peer_ips = k8s::get_seed_node_addrs(&client).await; + + let mut peers = vec![]; - // TODO recover public keys - // Using hardcoded data to test - let peers = [decode_json::>(&"{\"key\": \"node:public:ed25519:3059a9ae1de665f2e5dfcb64d5d3a044b2b3617436ddd56c2baf896878b13961\",\"addr\": \"10.13.33.0:3045\"}")?.0].to_vec(); + for i in 0..seed_nodes { + let node_id = &format!("node_{i:0>2}"); + let node_key = read_node_key_from_config(node_id)?; + let address = peer_ips.get(node_id).context("IP address not found")?; + peers.push(NodeAddr { + key: node_key.public(), + addr: SocketAddr::from_str(&format!("{address}:{NODES_PORT}"))?, + }); + } // deploy the rest of nodes for i in seed_nodes..nodes { - k8s::create_deployment( - &client, - &format!("consensus-node-{i:0>2}"), - &format!("node_{i:0>2}"), - false, - peers.clone(), - NAMESPACE, - ) - .await?; + k8s::create_deployment(&client, i, false, peers.clone(), NAMESPACE).await?; } Ok(()) } +fn read_node_key_from_config(node_id: &String) -> anyhow::Result { + let manifest_path = std::env::var("CARGO_MANIFEST_DIR")?; + let root = PathBuf::from(manifest_path).join("k8s_configs"); + let node_key_path = root.join(node_id).join("node_key"); + let key = fs::read_to_string(node_key_path).context("failed reading file")?; + Text::new(&key).decode().context("failed decoding key") +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let DeployerCLI { command } = DeployerCLI::parse(); diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 6398902f..5af29820 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; - use crate::NodeAddr; use k8s_openapi::api::{ apps::v1::Deployment, @@ -10,6 +8,7 @@ use kube::{ Api, Client, ResourceExt, }; use serde_json::json; +use std::collections::HashMap; use tracing::log::info; use zksync_protobuf::serde::Serde; @@ -51,13 +50,14 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R pub async fn create_deployment( client: &Client, - node_name: &str, - node_id: &str, + node_number: usize, is_seed: bool, peers: Vec, namespace: &str, ) -> anyhow::Result<()> { let cli_args = get_cli_args(peers); + let node_name = format!("consensus-node-{node_number:0>2}"); + let node_id = format!("node_{node_number:0>2}"); let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", @@ -76,6 +76,7 @@ pub async fn create_deployment( "metadata": { "labels": { "app": node_name, + "id": node_id, "seed": is_seed.to_string() } }, @@ -127,15 +128,15 @@ pub async fn create_deployment( /// Returns a HashMap with mapping: node_name -> IP address pub async fn get_seed_node_addrs(client: &Client) -> HashMap { - let mut result = HashMap::new(); + let mut seed_nodes = HashMap::new(); let pods: Api = Api::namespaced(client.clone(), "consensus"); let lp = ListParams::default().labels("seed=true"); for p in pods.list(&lp).await.unwrap() { - let node_name = p.labels()["app"].clone(); - result.insert(node_name, p.status.unwrap().pod_ip.unwrap()); + let node_id = p.labels()["id"].clone(); + seed_nodes.insert(node_id, p.status.unwrap().pod_ip.unwrap()); } - result + seed_nodes } fn get_cli_args(peers: Vec) -> Vec { diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index 39d3a4d3..aa4e8fbb 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -27,7 +27,8 @@ impl Distribution for Standard { let _ = config.add_gossip_static_inbound(rng.gen::().public()); }); (0..6).into_iter().for_each(|_| { - let _ = config.add_gossip_static_outbound(rng.gen::().public(), make_addr(rng)); + let _ = config + .add_gossip_static_outbound(rng.gen::().public(), make_addr(rng)); }); config } From eb7512a7a5afbac1ce2d9bf9e33e350e3b7d1fe3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:05:35 -0300 Subject: [PATCH 083/139] Add necessary features in kube and jsonrpsee dependencies --- node/Cargo.lock | 128 ++++++++++++++++++++++++++++++++++++++++++++++++ node/Cargo.toml | 4 +- 2 files changed, 130 insertions(+), 2 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 5f42b1c4..4687e19c 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -696,6 +696,12 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "data-encoding" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" + [[package]] name = "der" version = "0.7.8" @@ -1206,6 +1212,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "im" version = "15.1.0" @@ -1333,6 +1349,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" dependencies = [ "jsonrpsee-core", + "jsonrpsee-http-client", "jsonrpsee-server", "jsonrpsee-types", "tokio", @@ -1360,6 +1377,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-server" version = "0.21.0" @@ -1454,6 +1491,7 @@ dependencies = [ "kube-core", "pem", "pin-project", + "rand 0.8.5", "rustls", "rustls-pemfile", "secrecy", @@ -1462,6 +1500,7 @@ dependencies = [ "serde_yaml", "thiserror", "tokio", + "tokio-tungstenite", "tokio-util", "tower", "tower-http", @@ -2687,6 +2726,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha2" version = "0.10.8" @@ -2965,6 +3015,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.36.0" @@ -3026,6 +3091,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +dependencies = [ + "futures-util", + "log", + "tokio", + "tungstenite", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -3169,6 +3246,25 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -3181,12 +3277,27 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-width" version = "0.1.11" @@ -3215,6 +3326,23 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.1" diff --git a/node/Cargo.toml b/node/Cargo.toml index 5791d77b..5229f531 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -82,9 +82,9 @@ time = "0.3.23" tokio = { version = "1.34.0", features = ["full"] } tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } -kube = { version = "0.88.1", features = ["runtime", "derive"] } +kube = { version = "0.88.1", features = ["runtime", "derive", "ws"] } k8s-openapi = { version = "0.21.0", features = ["latest"] } -jsonrpsee = { version = "0.21.0", features = ["server"] } +jsonrpsee = { version = "0.21.0", features = ["server", "http-client"] } tower = { version = "0.4.13" } # Note that "bench" profile inherits from "release" profile and From b587174ac9028419b01062ea1e84b8694285350a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:06:45 -0300 Subject: [PATCH 084/139] Deploy service to communicate with k8s pods --- node/tools/src/k8s.rs | 66 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 4 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 5af29820..af789fd6 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,7 +1,7 @@ use crate::NodeAddr; use k8s_openapi::api::{ apps::v1::Deployment, - core::v1::{Namespace, Pod}, + core::v1::{Namespace, Pod, Service}, }; use kube::{ api::{ListParams, PostParams}, @@ -11,6 +11,10 @@ use serde_json::json; use std::collections::HashMap; use tracing::log::info; use zksync_protobuf::serde::Serde; +use kube::{ + api::{DeleteParams}, + runtime::wait::{await_condition, conditions::is_pod_running}, +}; /// Get a kube client pub async fn get_client() -> anyhow::Result { @@ -48,6 +52,51 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R } } +/// Creates a namespace in k8s cluster +pub async fn create_or_reuse_service(client: &Client, name: &str, node_name: &str) -> anyhow::Result<()> { + let services: Api = Api::namespaced(client.clone(), "consensus"); + let example_service = services.get_opt(name).await?; + if example_service.is_none() { + let service: Service = serde_json::from_value(json!({ + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": name, + "namespace": "consensus", + "labels": { + "app": node_name + } + }, + "spec": { + "type": "NodePort", + "ports": [ + { + "port": 80, + "targetPort": 3154, + "protocol": "TCP", + } + ], + "selector": { + "app": node_name + }, + } + }))?; + + let services: Api = Api::namespaced(client.clone(), "consensus"); + let post_params = PostParams::default(); + let result = services.create(&post_params, &service).await?; + + info!("Service: {} ,created", result.metadata.name.unwrap()); + Ok(()) + } else { + info!( + "Service: {} ,already exists", + example_service.unwrap().metadata.name.unwrap() + ); + Ok(()) + } +} + pub async fn create_deployment( client: &Client, node_number: usize, @@ -96,19 +145,20 @@ pub async fn create_deployment( "imagePullPolicy": "Never", "ports": [ { - "containerPort": 3054 + "containerPort": 3154, + "protocol": "TCP" } ], "livenessProbe": { "httpGet": { "path": "/health", - "port": 3054 + "port": 3154 } }, "readinessProbe": { "httpGet": { "path": "/health", - "port": 3054 + "port": 3154 } } } @@ -121,6 +171,14 @@ pub async fn create_deployment( let deployments: Api = Api::namespaced(client.clone(), namespace); let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; + // tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; + + // let pods: Api = Api::namespaced(client.clone(), namespace); + // let lp = ListParams::default().labels(&format!("app={}", node_name)); + // let pod = pods.list(&lp).await?; + // let a = pod.into_iter().find(|pod| pod.clone().metadata.name.unwrap().starts_with(node_name)).unwrap(); + // let pf = pods.portforward(&a.metadata.name.unwrap(), &[3150, 3154]).await; + // println!("Portforward: {:?}", pf.is_ok()); info!("Deployment: {} , created", result.metadata.name.unwrap()); Ok(()) From 0f1e075eca59e3356aae1c5c72874ecad331fbd7 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:07:40 -0300 Subject: [PATCH 085/139] Add sanity test to check node is running in a pod --- node/tools/src/lib.rs | 2 +- node/tools/src/rpc/methods/health_check.rs | 2 +- node/tools/src/rpc/methods/mod.rs | 4 ++-- node/tools/src/rpc/mod.rs | 5 ++--- node/tools/tests/sanity_test.rs | 21 +++++++++++++++++++++ 5 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 node/tools/tests/sanity_test.rs diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index 229d98c1..0c8a24a4 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -3,7 +3,7 @@ mod config; pub mod k8s; mod proto; -mod rpc; +pub mod rpc; mod store; #[cfg(test)] diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index 8d32107a..c92f6e90 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -3,7 +3,7 @@ use super::RPCMethod; use jsonrpsee::types::{error::ErrorCode, Params}; /// Health check method for RPC server. -pub(crate) struct HealthCheck; +pub struct HealthCheck; impl RPCMethod for HealthCheck { /// Health check response for /health endpoint. diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 2cac6a52..7b5fdc5e 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,7 +1,7 @@ use jsonrpsee::types::{error::ErrorCode, Params}; /// Trait to implement for new RPC methods. -pub(crate) trait RPCMethod { +pub trait RPCMethod { /// Method response logic when called. fn callback(params: Params) -> Result; /// Method name. @@ -10,5 +10,5 @@ pub(crate) trait RPCMethod { fn path() -> &'static str; } -pub(crate) mod health_check; +pub mod health_check; pub(crate) mod peers; diff --git a/node/tools/src/rpc/mod.rs b/node/tools/src/rpc/mod.rs index 89bd9b90..ecd45008 100644 --- a/node/tools/src/rpc/mod.rs +++ b/node/tools/src/rpc/mod.rs @@ -1,4 +1,3 @@ //! RPC server for testing purposes. -mod methods; -/// Module for the RPC server implementation. -pub(crate) mod server; +pub mod methods; +pub mod server; diff --git a/node/tools/tests/sanity_test.rs b/node/tools/tests/sanity_test.rs new file mode 100644 index 00000000..ae59d77f --- /dev/null +++ b/node/tools/tests/sanity_test.rs @@ -0,0 +1,21 @@ +mod tests { + use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; + use kube::Api; + use zksync_consensus_tools::rpc::methods::{health_check::HealthCheck, RPCMethod}; + use zksync_consensus_tools::k8s; + use k8s_openapi::api::{ + }; + + + #[tokio::test] + async fn sanity_test() { + let url = "http://127.0.0.1:3154"; + //let k8s_client = k8s::get_client().await.unwrap(); + + let rpc_client = HttpClientBuilder::default().build(url).unwrap(); + let params = Params::new(None); + + let response : serde_json::Value = rpc_client.request("health", rpc_params!()).await.unwrap(); + assert_eq!(response, HealthCheck::callback(params).unwrap()); + } +} From 3ba75b1e466f286795b6d465b2a031f27aae6906 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:07:59 -0300 Subject: [PATCH 086/139] Add service deployment to deployer binary --- node/tools/src/bin/deployer.rs | 47 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index b9d4d457..4a28fbb7 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -106,31 +106,32 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { NAMESPACE, ) .await?; + k8s::create_or_reuse_service(&client, &format!("consensus-node-{i:0>2}"), &format!("consensus-node-{i:0>2}")).await?; } - // Waiting 15 secs to allow the pods to start - // TODO: should replace with some safer method - tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; - - // obtain seed peer(s) IP(s) - let peer_ips = k8s::get_seed_node_addrs(&client).await; - - let mut peers = vec![]; - - for i in 0..seed_nodes { - let node_id = &format!("node_{i:0>2}"); - let node_key = read_node_key_from_config(node_id)?; - let address = peer_ips.get(node_id).context("IP address not found")?; - peers.push(NodeAddr { - key: node_key.public(), - addr: SocketAddr::from_str(&format!("{address}:{NODES_PORT}"))?, - }); - } - - // deploy the rest of nodes - for i in seed_nodes..nodes { - k8s::create_deployment(&client, i, false, peers.clone(), NAMESPACE).await?; - } + // // Waiting 15 secs to allow the pods to start + // // TODO: should replace with some safer method + // tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; + + // // obtain seed peer(s) IP(s) + // let peer_ips = k8s::get_seed_node_addrs(&client).await; + + // let mut peers = vec![]; + + // for i in 0..seed_nodes { + // let node_id = &format!("node_{i:0>2}"); + // let node_key = read_node_key_from_config(node_id)?; + // let address = peer_ips.get(node_id).context("IP address not found")?; + // peers.push(NodeAddr { + // key: node_key.public(), + // addr: SocketAddr::from_str(&format!("{address}:{NODES_PORT}"))?, + // }); + // } + + // // deploy the rest of nodes + // for i in seed_nodes..nodes { + // k8s::create_deployment(&client, i, false, peers.clone(), NAMESPACE).await?; + // } Ok(()) } From 8e49847055c5b1d8fbeec157d6f77bb0c63968ec Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:08:22 -0300 Subject: [PATCH 087/139] Add bash script for setup and test run --- node/tools/tests/test.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100755 node/tools/tests/test.sh diff --git a/node/tools/tests/test.sh b/node/tools/tests/test.sh new file mode 100755 index 00000000..0484332d --- /dev/null +++ b/node/tools/tests/test.sh @@ -0,0 +1,2 @@ +cd ../../../ && make start_k8s_nodes NODES=1 +cd node && cargo test sanity_test From ea6c3b98f64d6e253a006c96b94fe558aa312ab0 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 6 Feb 2024 18:08:40 -0300 Subject: [PATCH 088/139] Make executor argument optional --- node/tools/src/main.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 675932ac..f6efb5f9 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -43,7 +43,7 @@ struct Args { rpc_port: Option, /// IP address and key of the seed peers. #[arg(long)] - add_gossip_static_outbound: NodeAddrs, + add_gossip_static_outbound: Option, } impl Args { @@ -103,12 +103,15 @@ async fn main() -> anyhow::Result<()> { .context("config_paths().load()")?; // Add gossipStaticOutbound pairs from cli to config - configs.app.gossip_static_outbound.extend( - args.add_gossip_static_outbound - .0 - .into_iter() - .map(|e| (e.0.key, e.0.addr)), - ); + if let Some(go) = args.add_gossip_static_outbound { + configs.app.gossip_static_outbound.extend( + go + .0 + .into_iter() + .map(|e| (e.0.key, e.0.addr)), + ); + } + let (executor, runner) = configs .make_executor(ctx) From d61288950085d97d87464f6a2b252689fbb9a382 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Wed, 7 Feb 2024 18:43:22 -0300 Subject: [PATCH 089/139] Busy waiting for pods to start to obtain their IPs --- node/Cargo.lock | 12 ++++ node/Cargo.toml | 3 +- node/tools/Cargo.toml | 1 + node/tools/src/bin/deployer.rs | 8 +-- node/tools/src/k8s.rs | 122 +++++++++++++++++++++++---------- 5 files changed, 101 insertions(+), 45 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 5f42b1c4..c6c86e46 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -3005,6 +3005,17 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -3776,6 +3787,7 @@ dependencies = [ "serde_json", "tempfile", "tokio", + "tokio-retry", "tower", "tracing", "tracing-subscriber", diff --git a/node/Cargo.toml b/node/Cargo.toml index 5791d77b..493a4727 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -80,12 +80,13 @@ test-casing = "0.1.0" thiserror = "1.0.40" time = "0.3.23" tokio = { version = "1.34.0", features = ["full"] } +tokio-retry = "0.3.0" tracing = { version = "0.1.37", features = ["attributes"] } tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } kube = { version = "0.88.1", features = ["runtime", "derive"] } k8s-openapi = { version = "0.21.0", features = ["latest"] } jsonrpsee = { version = "0.21.0", features = ["server"] } -tower = { version = "0.4.13" } +tower = "0.4.13" # Note that "bench" profile inherits from "release" profile and # "test" profile inherits from "dev" profile. diff --git a/node/tools/Cargo.toml b/node/tools/Cargo.toml index 3546e97c..93e80fa4 100644 --- a/node/tools/Cargo.toml +++ b/node/tools/Cargo.toml @@ -26,6 +26,7 @@ rocksdb.workspace = true serde.workspace = true serde_json.workspace = true tokio.workspace = true +tokio-retry.workspace = true tracing.workspace = true tracing-subscriber.workspace = true vise-exporter.workspace = true diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index b9d4d457..16f9234b 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -64,7 +64,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { } } - let manifest_path = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + let manifest_path = std::env::var("CARGO_MANIFEST_DIR")?; let root = PathBuf::from(manifest_path).join("k8s_configs"); let _ = fs::remove_dir_all(&root); for (i, cfg) in cfgs.into_iter().enumerate() { @@ -108,12 +108,8 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { .await?; } - // Waiting 15 secs to allow the pods to start - // TODO: should replace with some safer method - tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; - // obtain seed peer(s) IP(s) - let peer_ips = k8s::get_seed_node_addrs(&client).await; + let peer_ips = k8s::get_seed_node_addrs(&client, seed_nodes).await?; let mut peers = vec![]; diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 5af29820..e16bdf43 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,14 +1,18 @@ -use crate::NodeAddr; +use crate::{config, NodeAddr}; +use anyhow::{anyhow, Context}; use k8s_openapi::api::{ apps::v1::Deployment, core::v1::{Namespace, Pod}, }; use kube::{ api::{ListParams, PostParams}, + core::ObjectList, Api, Client, ResourceExt, }; use serde_json::json; use std::collections::HashMap; +use tokio_retry::strategy::FixedInterval; +use tokio_retry::Retry; use tracing::log::info; use zksync_protobuf::serde::Serde; @@ -20,34 +24,46 @@ pub async fn get_client() -> anyhow::Result { /// Creates a namespace in k8s cluster pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result<()> { let namespaces: Api = Api::all(client.clone()); - let consensus_namespace = namespaces.get_opt(name).await?; - if consensus_namespace.is_none() { - let namespace: Namespace = serde_json::from_value(json!({ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "consensus", - "labels": { - "name": "consensus" + match namespaces.get_opt(name).await? { + None => { + let namespace: Namespace = serde_json::from_value(json!({ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "consensus", + "labels": { + "name": "consensus" + } } - } - }))?; + }))?; - let namespaces: Api = Api::all(client.clone()); - let post_params = PostParams::default(); - let result = namespaces.create(&post_params, &namespace).await?; + let namespaces: Api = Api::all(client.clone()); + let post_params = PostParams::default(); + let result = namespaces.create(&post_params, &namespace).await?; - info!("Namespace: {} ,created", result.metadata.name.unwrap()); - Ok(()) - } else { - info!( - "Namespace: {} ,already exists", - consensus_namespace.unwrap().metadata.name.unwrap() - ); - Ok(()) + info!( + "Namespace: {} ,created", + result + .metadata + .name + .context("Name not defined in metadata")? + ); + Ok(()) + } + Some(consensus_namespace) => { + info!( + "Namespace: {} ,already exists", + consensus_namespace + .metadata + .name + .context("Name not defined in metadata")? + ); + Ok(()) + } } } +/// Creates a deployment pub async fn create_deployment( client: &Client, node_number: usize, @@ -122,21 +138,58 @@ pub async fn create_deployment( let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; - info!("Deployment: {} , created", result.metadata.name.unwrap()); + info!( + "Deployment: {} , created", + result + .metadata + .name + .context("Name not defined in metadata")? + ); Ok(()) } -/// Returns a HashMap with mapping: node_name -> IP address -pub async fn get_seed_node_addrs(client: &Client) -> HashMap { +/// Returns a HashMap with mapping: node_id -> IP address +pub async fn get_seed_node_addrs( + client: &Client, + amount: usize, +) -> anyhow::Result> { let mut seed_nodes = HashMap::new(); let pods: Api = Api::namespaced(client.clone(), "consensus"); - let lp = ListParams::default().labels("seed=true"); - for p in pods.list(&lp).await.unwrap() { + // Will retry 15 times during 15 seconds to allow pods to start and obtain an IP + let retry_strategy = FixedInterval::from_millis(1000).take(15); + let pod_list = Retry::spawn(retry_strategy, || get_seed_pods(&pods, amount)).await?; + + for p in pod_list { let node_id = p.labels()["id"].clone(); - seed_nodes.insert(node_id, p.status.unwrap().pod_ip.unwrap()); + seed_nodes.insert( + node_id, + p.status + .context("Status not present")? + .pod_ip + .context("Pod IP address not present")?, + ); } - seed_nodes + Ok(seed_nodes) +} + +async fn get_seed_pods(pods: &Api, amount: usize) -> anyhow::Result> { + let lp = ListParams::default().labels("seed=true"); + let p = pods.list(&lp).await?; + if p.items.len() == amount && p.iter().all(is_pod_running) { + Ok(p) + } else { + Err(anyhow!("Pods are not ready")) + } +} + +fn is_pod_running(pod: &Pod) -> bool { + if let Some(status) = &pod.status { + if let Some(phase) = &status.phase { + return phase == "Running"; + } + } + false } fn get_cli_args(peers: Vec) -> Vec { @@ -145,7 +198,7 @@ fn get_cli_args(peers: Vec) -> Vec { } else { [ "--add-gossip-static-outbound".to_string(), - encode_json( + config::encode_json( &peers .iter() .map(|e| Serde(e.clone())) @@ -155,10 +208,3 @@ fn get_cli_args(peers: Vec) -> Vec { .to_vec() } } - -/// Encodes a generated proto message to json for arbitrary ProtoFmt. -pub fn encode_json(x: &T) -> String { - let mut s = serde_json::Serializer::new(vec![]); - T::serialize(x, &mut s).unwrap(); - String::from_utf8(s.into_inner()).unwrap() -} From 75b21edca36c40c66d99a47758841c27137185ef Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 8 Feb 2024 11:17:54 -0300 Subject: [PATCH 090/139] Add makefile targets to run tests inside kubernetes --- Makefile | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2f2727f0..b05b88e9 100644 --- a/Makefile +++ b/Makefile @@ -17,11 +17,17 @@ nodes_config: docker_build_executor: docker build --output=node/tools/docker_binaries --target=binary . +docker_build_tester: + docker build --output=node/tools/docker_binaries --target=binary -f node/tests/Dockerfile node + docker_node_image: docker build -t consensus-node --target=runtime . +docker_test_image: + docker build -t test-suite -f node/tests/Dockerfile --target=runtime node + docker_nodes_config: - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config + cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config docker_node: $(MAKE) docker_node_image @@ -38,10 +44,15 @@ stop_docker_nodes: docker stop consensus-node-1 consensus-node-2 start_k8s_nodes: - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer generate-config --nodes ${NODES} + cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer generate-config --nodes ${NODES} $(MAKE) docker_node_image minikube image load consensus-node:latest - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer deploy --nodes ${NODES} + cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer deploy --nodes ${NODES} + +start_k8s_tests: + $(MAKE) docker_test_image + minikube image load test-suite:latest + kubectl apply -f node/tests/test_deployments.yaml # Clean commands From 5d599a86622df9aac4eddade3fc2ade82073aa4a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 8 Feb 2024 11:26:51 -0300 Subject: [PATCH 091/139] Add new binary to run test scenarios --- node/Cargo.lock | 10 ++++++++++ node/Cargo.toml | 3 ++- node/tests/Cargo.toml | 17 +++++++++++++++++ node/tests/src/main.rs | 22 ++++++++++++++++++++++ node/tools/tests/sanity_test.rs | 21 --------------------- node/tools/tests/test.sh | 2 -- 6 files changed, 51 insertions(+), 24 deletions(-) create mode 100644 node/tests/Cargo.toml create mode 100644 node/tests/src/main.rs delete mode 100644 node/tools/tests/sanity_test.rs delete mode 100755 node/tools/tests/test.sh diff --git a/node/Cargo.lock b/node/Cargo.lock index 4687e19c..74082815 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2947,6 +2947,16 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "tester" +version = "0.1.0" +dependencies = [ + "jsonrpsee", + "serde_json", + "tokio", + "zksync_consensus_tools", +] + [[package]] name = "thiserror" version = "1.0.56" diff --git a/node/Cargo.toml b/node/Cargo.toml index 5229f531..98ca4a02 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -10,7 +10,8 @@ members = [ "libs/protobuf_build", "libs/roles", "libs/storage", - "libs/utils", + "libs/utils", + "tests", "tools", ] resolver = "2" diff --git a/node/tests/Cargo.toml b/node/tests/Cargo.toml new file mode 100644 index 00000000..fa1e9cd5 --- /dev/null +++ b/node/tests/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tester" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +homepage.workspace = true +license.workspace = true + +[dependencies] +zksync_consensus_tools.workspace = true + +serde_json.workspace = true +tokio.workspace = true +jsonrpsee.workspace = true + +[lints] +workspace = true diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs new file mode 100644 index 00000000..3c97a86e --- /dev/null +++ b/node/tests/src/main.rs @@ -0,0 +1,22 @@ +//! This is a simple test for the RPC server. It checks if the server is running and can respond to. +use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; +use zksync_consensus_tools::rpc::methods::{health_check::HealthCheck, RPCMethod}; + +/// Sanity test for the RPC server. +pub async fn sanity_test() { + let url = "http://127.0.0.1:3154"; + let rpc_client = HttpClientBuilder::default().build(url).unwrap(); + let params = Params::new(None); + let response: serde_json::Value = rpc_client + .request(HealthCheck::method(), rpc_params!()) + .await + .unwrap(); + assert_eq!(response, HealthCheck::callback(params).unwrap()); +} + +/// Main function for the test. +#[tokio::main] +async fn main() { + sanity_test().await; + println!("IT WORKS!"); +} diff --git a/node/tools/tests/sanity_test.rs b/node/tools/tests/sanity_test.rs deleted file mode 100644 index ae59d77f..00000000 --- a/node/tools/tests/sanity_test.rs +++ /dev/null @@ -1,21 +0,0 @@ -mod tests { - use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; - use kube::Api; - use zksync_consensus_tools::rpc::methods::{health_check::HealthCheck, RPCMethod}; - use zksync_consensus_tools::k8s; - use k8s_openapi::api::{ - }; - - - #[tokio::test] - async fn sanity_test() { - let url = "http://127.0.0.1:3154"; - //let k8s_client = k8s::get_client().await.unwrap(); - - let rpc_client = HttpClientBuilder::default().build(url).unwrap(); - let params = Params::new(None); - - let response : serde_json::Value = rpc_client.request("health", rpc_params!()).await.unwrap(); - assert_eq!(response, HealthCheck::callback(params).unwrap()); - } -} diff --git a/node/tools/tests/test.sh b/node/tools/tests/test.sh deleted file mode 100755 index 0484332d..00000000 --- a/node/tools/tests/test.sh +++ /dev/null @@ -1,2 +0,0 @@ -cd ../../../ && make start_k8s_nodes NODES=1 -cd node && cargo test sanity_test From cf48dfffb03786d95dd8dd6ce2e94713217d8425 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 8 Feb 2024 11:32:13 -0300 Subject: [PATCH 092/139] Add Dockerfile and kubernetes config files --- node/tests/Dockerfile | 13 +++++++++++++ node/tests/test_deployments.yaml | 21 +++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 node/tests/Dockerfile create mode 100644 node/tests/test_deployments.yaml diff --git a/node/tests/Dockerfile b/node/tests/Dockerfile new file mode 100644 index 00000000..4704ed1f --- /dev/null +++ b/node/tests/Dockerfile @@ -0,0 +1,13 @@ +FROM rust:latest as builder +COPY . /test/ +WORKDIR /test +RUN apt-get update && apt-get install -y libclang-dev +RUN cargo build --release + +FROM scratch as binary +COPY --from=builder test/target/release/tester . + +FROM debian:stable-slim as runtime +COPY /tools/docker_binaries/tester /test/ + +WORKDIR /test diff --git a/node/tests/test_deployments.yaml b/node/tests/test_deployments.yaml new file mode 100644 index 00000000..ab66cc8b --- /dev/null +++ b/node/tests/test_deployments.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tests-deployment + namespace: consensus + labels: + app: test-node +spec: + selector: + matchLabels: + app: test-node + template: + metadata: + labels: + app: test-node + spec: + containers: + - name: test-suite + image: test-suite:latest + imagePullPolicy: Never + command: ["./tester"] From 7b5d2649cdcf45a0002e2988008037556cd1f194 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 8 Feb 2024 11:33:33 -0300 Subject: [PATCH 093/139] Fix format --- node/tools/src/bin/deployer.rs | 7 ++- node/tools/src/k8s.rs | 84 +++++++++++++++++----------------- node/tools/src/main.rs | 13 ++---- 3 files changed, 53 insertions(+), 51 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 4a28fbb7..5d5056fe 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -106,7 +106,12 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { NAMESPACE, ) .await?; - k8s::create_or_reuse_service(&client, &format!("consensus-node-{i:0>2}"), &format!("consensus-node-{i:0>2}")).await?; + k8s::create_or_reuse_service( + &client, + &format!("consensus-node-{i:0>2}"), + &format!("consensus-node-{i:0>2}"), + ) + .await?; } // // Waiting 15 secs to allow the pods to start diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index af789fd6..92e810f5 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -11,10 +11,6 @@ use serde_json::json; use std::collections::HashMap; use tracing::log::info; use zksync_protobuf::serde::Serde; -use kube::{ - api::{DeleteParams}, - runtime::wait::{await_condition, conditions::is_pod_running}, -}; /// Get a kube client pub async fn get_client() -> anyhow::Result { @@ -53,48 +49,52 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R } /// Creates a namespace in k8s cluster -pub async fn create_or_reuse_service(client: &Client, name: &str, node_name: &str) -> anyhow::Result<()> { - let services: Api = Api::namespaced(client.clone(), "consensus"); - let example_service = services.get_opt(name).await?; - if example_service.is_none() { - let service: Service = serde_json::from_value(json!({ - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "name": name, - "namespace": "consensus", - "labels": { - "app": node_name - } - }, - "spec": { - "type": "NodePort", - "ports": [ - { - "port": 80, - "targetPort": 3154, - "protocol": "TCP", +pub async fn create_or_reuse_service( + client: &Client, + name: &str, + node_name: &str, +) -> anyhow::Result<()> { + let services: Api = Api::namespaced(client.clone(), "consensus"); + let example_service = services.get_opt(name).await?; + if example_service.is_none() { + let service: Service = serde_json::from_value(json!({ + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": name, + "namespace": "consensus", + "labels": { + "app": node_name } - ], - "selector": { - "app": node_name }, - } - }))?; + "spec": { + "type": "NodePort", + "ports": [ + { + "port": 80, + "targetPort": 3154, + "protocol": "TCP", + } + ], + "selector": { + "app": node_name + }, + } + }))?; - let services: Api = Api::namespaced(client.clone(), "consensus"); - let post_params = PostParams::default(); - let result = services.create(&post_params, &service).await?; + let services: Api = Api::namespaced(client.clone(), "consensus"); + let post_params = PostParams::default(); + let result = services.create(&post_params, &service).await?; - info!("Service: {} ,created", result.metadata.name.unwrap()); - Ok(()) - } else { - info!( - "Service: {} ,already exists", - example_service.unwrap().metadata.name.unwrap() - ); - Ok(()) - } + info!("Service: {} ,created", result.metadata.name.unwrap()); + Ok(()) + } else { + info!( + "Service: {} ,already exists", + example_service.unwrap().metadata.name.unwrap() + ); + Ok(()) + } } pub async fn create_deployment( diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index f6efb5f9..ff71e855 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -103,15 +103,12 @@ async fn main() -> anyhow::Result<()> { .context("config_paths().load()")?; // Add gossipStaticOutbound pairs from cli to config - if let Some(go) = args.add_gossip_static_outbound { - configs.app.gossip_static_outbound.extend( - go - .0 - .into_iter() - .map(|e| (e.0.key, e.0.addr)), - ); + if let Some(outbound) = args.add_gossip_static_outbound { + configs + .app + .gossip_static_outbound + .extend(outbound.0.into_iter().map(|e| (e.0.key, e.0.addr))); } - let (executor, runner) = configs .make_executor(ctx) From 6bd539480b5566d73f20de4ccd72b58e1ac21045 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 8 Feb 2024 11:36:36 -0300 Subject: [PATCH 094/139] Revert change to have node ips as directory names for local config --- node/tools/src/bin/localnet_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index 60a74749..ebe9ba1d 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -70,7 +70,7 @@ fn main() -> anyhow::Result<()> { for (i, cfg) in cfgs.into_iter().enumerate() { // Recreate the directory for the node's config. - let root = args.output_dir.join(format!("node_{}", i)); + let root = args.output_dir.join(cfg.public_addr.to_string()); let _ = fs::remove_dir_all(&root); fs::create_dir_all(&root).with_context(|| format!("create_dir_all({:?})", root))?; cfg.write_to_file(&root)?; From 1d584319b97f7ec55c8763fba841d19b3d078557 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Fri, 9 Feb 2024 15:39:42 -0300 Subject: [PATCH 095/139] Making cli argument optional and forwarding arguments in entrypoint script --- docker-entrypoint.sh | 2 +- node/tools/src/config.rs | 15 ++++++++++++--- node/tools/src/k8s.rs | 10 +++++++--- node/tools/src/main.rs | 14 +++++++------- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 07a3c781..6b8977c1 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -3,4 +3,4 @@ cd docker_config/${NODE_ID} export RUST_LOG=INFO -../../executor +../../executor $@ diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 81b18bed..24e8f4cd 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -1,6 +1,7 @@ //! Node configuration. use crate::{proto, store}; use anyhow::Context as _; +use serde_json::{ser::Formatter, Serializer}; use std::{ collections::{HashMap, HashSet}, fs, @@ -31,10 +32,18 @@ pub fn decode_json(json: &str) -> anyhow::Result /// Encodes a generated proto message to json for arbitrary ProtoFmt. pub fn encode_json(x: &T) -> String { - let mut s = serde_json::Serializer::pretty(vec![]); - T::serialize(x, &mut s).unwrap(); - String::from_utf8(s.into_inner()).unwrap() + let s = serde_json::Serializer::pretty(vec![]); + encode_json_with_serializer(x, s) } + +pub fn encode_json_with_serializer( + x: &T, + mut serializer: Serializer, F>, +) -> String { + T::serialize(x, &mut serializer).unwrap(); + String::from_utf8(serializer.into_inner()).unwrap() +} + // pub fn encode_json(x: &T) -> String { // let mut s = serde_json::Serializer::pretty(vec![]); // zksync_protobuf::serde::serialize(x, &mut s).unwrap(); diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index e16bdf43..e03ee98f 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -113,18 +113,21 @@ pub async fn create_deployment( "ports": [ { "containerPort": 3054 + }, + { + "containerPort": 3154 } ], "livenessProbe": { "httpGet": { "path": "/health", - "port": 3054 + "port": 3154 } }, "readinessProbe": { "httpGet": { "path": "/health", - "port": 3054 + "port": 3154 } } } @@ -198,11 +201,12 @@ fn get_cli_args(peers: Vec) -> Vec { } else { [ "--add-gossip-static-outbound".to_string(), - config::encode_json( + config::encode_json_with_serializer( &peers .iter() .map(|e| Serde(e.clone())) .collect::>>(), + serde_json::Serializer::new(vec![]), ), ] .to_vec() diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 675932ac..aad9a4cf 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -43,7 +43,7 @@ struct Args { rpc_port: Option, /// IP address and key of the seed peers. #[arg(long)] - add_gossip_static_outbound: NodeAddrs, + add_gossip_static_outbound: Option, } impl Args { @@ -103,12 +103,12 @@ async fn main() -> anyhow::Result<()> { .context("config_paths().load()")?; // Add gossipStaticOutbound pairs from cli to config - configs.app.gossip_static_outbound.extend( - args.add_gossip_static_outbound - .0 - .into_iter() - .map(|e| (e.0.key, e.0.addr)), - ); + if let Some(addrs) = args.add_gossip_static_outbound { + configs + .app + .gossip_static_outbound + .extend(addrs.0.into_iter().map(|e| (e.0.key, e.0.addr))); + } let (executor, runner) = configs .make_executor(ctx) From c864f51b60f01dd6019d5d61ac5ede4c8f446f74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Fri, 9 Feb 2024 18:36:48 -0300 Subject: [PATCH 096/139] Fixed compiler warnings --- node/tools/src/bin/deployer.rs | 4 ++-- node/tools/src/config.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 16f9234b..fd1205cc 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -7,7 +7,7 @@ use anyhow::Context; use clap::{Parser, Subcommand}; use rand::Rng; use zksync_consensus_crypto::{Text, TextFmt}; -use zksync_consensus_roles::node::{self, SecretKey}; +use zksync_consensus_roles::node::SecretKey; use zksync_consensus_tools::k8s; use zksync_consensus_tools::AppConfig; use zksync_consensus_tools::NodeAddr; @@ -51,7 +51,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { // Generate the node keys for all the replicas. let rng = &mut rand::thread_rng(); - let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); + let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); let (default_config, validator_keys) = AppConfig::default_for(nodes); let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect(); diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 24e8f4cd..5697febc 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -31,12 +31,12 @@ pub fn decode_json(json: &str) -> anyhow::Result } /// Encodes a generated proto message to json for arbitrary ProtoFmt. -pub fn encode_json(x: &T) -> String { +pub(crate) fn encode_json(x: &T) -> String { let s = serde_json::Serializer::pretty(vec![]); encode_json_with_serializer(x, s) } -pub fn encode_json_with_serializer( +pub(crate) fn encode_json_with_serializer( x: &T, mut serializer: Serializer, F>, ) -> String { From b5b5ee6ed29231648c3bbaf95bab5e2c51c169a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Wed, 14 Feb 2024 17:50:43 -0300 Subject: [PATCH 097/139] Consensus node now can obtain it's public address from an ENV VAR. Also added a endpoint to be able to check config from nodes --- node/tools/src/config.rs | 33 ++++++++++++++++++++----------- node/tools/src/k8s.rs | 10 +++++++++- node/tools/src/main.rs | 8 +++++++- node/tools/src/rpc/methods/mod.rs | 1 + node/tools/src/rpc/server.rs | 20 ++++++++++++++++--- 5 files changed, 56 insertions(+), 16 deletions(-) diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 5697febc..69b603ed 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -2,6 +2,7 @@ use crate::{proto, store}; use anyhow::Context as _; use serde_json::{ser::Formatter, Serializer}; +use std::str::FromStr; use std::{ collections::{HashMap, HashSet}, fs, @@ -12,10 +13,7 @@ use zksync_concurrency::ctx; use zksync_consensus_bft as bft; use zksync_consensus_crypto::{read_optional_text, read_required_text, Text, TextFmt}; use zksync_consensus_executor as executor; -use zksync_consensus_roles::{ - node::{self, PublicKey}, - validator, -}; +use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::{BlockStore, BlockStoreRunner, PersistentBlockStore}; use zksync_protobuf::{required, serde::Serde, ProtoFmt}; @@ -33,10 +31,10 @@ pub fn decode_json(json: &str) -> anyhow::Result /// Encodes a generated proto message to json for arbitrary ProtoFmt. pub(crate) fn encode_json(x: &T) -> String { let s = serde_json::Serializer::pretty(vec![]); - encode_json_with_serializer(x, s) + encode_with_serializer(x, s) } -pub(crate) fn encode_json_with_serializer( +pub(crate) fn encode_with_serializer( x: &T, mut serializer: Serializer, F>, ) -> String { @@ -53,7 +51,7 @@ pub(crate) fn encode_json_with_serializer, - pub gossip_static_outbound: HashMap, + pub gossip_static_inbound: HashSet, + pub gossip_static_outbound: HashMap, +} + +impl AppConfig { + pub fn check_public_addr(&mut self) -> anyhow::Result<()> { + if let Ok(public_addr) = std::env::var("PUBLIC_ADDR") { + self.public_addr = SocketAddr::from_str(&format!("{public_addr}:{NODES_PORT}"))?; + } + Ok(()) + } } impl ProtoFmt for AppConfig { @@ -275,12 +282,16 @@ impl AppConfig { self } - pub fn add_gossip_static_outbound(&mut self, key: PublicKey, addr: SocketAddr) -> &mut Self { + pub fn add_gossip_static_outbound( + &mut self, + key: node::PublicKey, + addr: SocketAddr, + ) -> &mut Self { self.gossip_static_outbound.insert(key, addr); self } - pub fn add_gossip_static_inbound(&mut self, key: PublicKey) -> &mut Self { + pub fn add_gossip_static_inbound(&mut self, key: node::PublicKey) -> &mut Self { self.gossip_static_inbound.insert(key); self } diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index e03ee98f..c414b593 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -105,6 +105,14 @@ pub async fn create_deployment( { "name": "NODE_ID", "value": node_id + }, + { + "name": "PUBLIC_ADDR", + "valueFrom": { + "fieldRef": { + "fieldPath": "status.podIP" + } + } } ], "command": ["./k8s_entrypoint.sh"], @@ -201,7 +209,7 @@ fn get_cli_args(peers: Vec) -> Vec { } else { [ "--add-gossip-static-outbound".to_string(), - config::encode_json_with_serializer( + config::encode_with_serializer( &peers .iter() .map(|e| Serde(e.clone())) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index aad9a4cf..428c6c09 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -102,6 +102,9 @@ async fn main() -> anyhow::Result<()> { .load() .context("config_paths().load()")?; + // if `PUBLIC_ADDR` env var is set, use it to override publicAddr in config + configs.app.check_public_addr().context("Public Address")?; + // Add gossipStaticOutbound pairs from cli to config if let Some(addrs) = args.add_gossip_static_outbound { configs @@ -121,7 +124,10 @@ async fn main() -> anyhow::Result<()> { } else { rpc_addr.set_port(rpc_addr.port() + 100); } - let rpc_server = RPCServer::new(rpc_addr); + + // cloning configuration to let RPCServer show it + // TODO this should be queried in real time instead, to reflect any possible change in config + let rpc_server = RPCServer::new(rpc_addr, configs.app.clone()); // Initialize the storage. scope::run!(ctx, |ctx, s| async { diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 2cac6a52..ab8f2fee 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -10,5 +10,6 @@ pub(crate) trait RPCMethod { fn path() -> &'static str; } +pub(crate) mod config; pub(crate) mod health_check; pub(crate) mod peers; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 6d2d5131..3b66dfa0 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,4 +1,6 @@ -use super::methods::{health_check::HealthCheck, peers::PeersInfo, RPCMethod}; +use crate::AppConfig; + +use super::methods::{config::ConfigInfo, health_check::HealthCheck, peers::PeersInfo, RPCMethod}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use std::net::SocketAddr; use zksync_concurrency::{ctx, scope}; @@ -7,11 +9,13 @@ use zksync_concurrency::{ctx, scope}; pub struct RPCServer { /// IP address to bind to. ip_address: SocketAddr, + /// AppConfig + config: AppConfig, } impl RPCServer { - pub fn new(ip_address: SocketAddr) -> Self { - Self { ip_address } + pub fn new(ip_address: SocketAddr, config: AppConfig) -> Self { + Self { ip_address, config } } /// Runs the RPC server. @@ -26,6 +30,10 @@ impl RPCServer { .layer(ProxyGetRequestLayer::new( PeersInfo::path(), PeersInfo::method(), + )?) + .layer(ProxyGetRequestLayer::new( + ConfigInfo::path(), + ConfigInfo::method(), )?); let server = Server::builder() @@ -39,6 +47,12 @@ impl RPCServer { })?; module.register_method(PeersInfo::method(), |params, _| PeersInfo::callback(params))?; + // TODO find a better way to implement this as I had to clone the clone and move it to pass the borrow checker + let config = self.config.clone(); + module.register_method(ConfigInfo::method(), move |_params, _| { + ConfigInfo::info(config.clone()) + })?; + let handle = server.start(module); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { From 702a587a2689a8c908032b4b57b1c08db97caa7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 15 Feb 2024 10:42:38 -0300 Subject: [PATCH 098/139] Added config RPC endpoint --- node/tools/src/rpc/methods/config.rs | 47 ++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 node/tools/src/rpc/methods/config.rs diff --git a/node/tools/src/rpc/methods/config.rs b/node/tools/src/rpc/methods/config.rs new file mode 100644 index 00000000..0d22ec4d --- /dev/null +++ b/node/tools/src/rpc/methods/config.rs @@ -0,0 +1,47 @@ +//! Peers method for RPC server. +use crate::{config::encode_json, decode_json, AppConfig}; + +use super::RPCMethod; +use jsonrpsee::types::{error::ErrorCode, Params}; +use std::fs::{self}; +use zksync_protobuf::serde::Serde; + +/// Config method for RPC server. +pub(crate) struct ConfigInfo; + +// RPCMethod trait should be more general to allow external parameters like this case +// TODO fix the trait and implement this code in it +impl ConfigInfo { + pub(crate) fn info(config: AppConfig) -> Result { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + Ok(serde_json::json!({ + "config": encode_json(&Serde(config)) + })) + } +} + +impl RPCMethod for ConfigInfo { + /// Config response for /config endpoint. + fn callback(_params: Params) -> Result { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + let node_config = + fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; + let node_config = decode_json::>(&node_config) + .map_err(|_e| ErrorCode::InternalError)? + .0; + let config = encode_json(&Serde(node_config)); + Ok(serde_json::json!({ + "config": config + })) + } + + /// Config method name. + fn method() -> &'static str { + "config" + } + + /// Method path for GET requests. + fn path() -> &'static str { + "/config" + } +} From 1ef6877faa5c6a79c738212b1c00e744efc1a09d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 15 Feb 2024 11:11:19 -0300 Subject: [PATCH 099/139] Corrected k8s_entrypoint.sh script to forward cli arguments --- k8s_entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s_entrypoint.sh b/k8s_entrypoint.sh index 62aaf5fa..8f6f498f 100644 --- a/k8s_entrypoint.sh +++ b/k8s_entrypoint.sh @@ -3,4 +3,4 @@ cd k8s_config/${NODE_ID} export RUST_LOG=INFO -../../executor +../../executor $@ From 8b9d14919cf34162f5c941507ac25ccfb88dff93 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:17:56 -0300 Subject: [PATCH 100/139] Update config files to run tester in k8s pod --- node/tests/Dockerfile | 6 +++++- node/tests/test_deployments.yaml | 2 +- node/tests/tester_entrypoint.sh | 5 +++++ 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 node/tests/tester_entrypoint.sh diff --git a/node/tests/Dockerfile b/node/tests/Dockerfile index 4704ed1f..fa733b0d 100644 --- a/node/tests/Dockerfile +++ b/node/tests/Dockerfile @@ -1,5 +1,5 @@ FROM rust:latest as builder -COPY . /test/ +COPY /node /test/ WORKDIR /test RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release @@ -9,5 +9,9 @@ COPY --from=builder test/target/release/tester . FROM debian:stable-slim as runtime COPY /tools/docker_binaries/tester /test/ +COPY /tests/tester_entrypoint.sh /test/ +COPY /tests/config.txt /test/ WORKDIR /test + +RUN chmod +x tester_entrypoint.sh diff --git a/node/tests/test_deployments.yaml b/node/tests/test_deployments.yaml index ab66cc8b..b1a882eb 100644 --- a/node/tests/test_deployments.yaml +++ b/node/tests/test_deployments.yaml @@ -18,4 +18,4 @@ spec: - name: test-suite image: test-suite:latest imagePullPolicy: Never - command: ["./tester"] + command: ["./tester_entrypoint.sh"] diff --git a/node/tests/tester_entrypoint.sh b/node/tests/tester_entrypoint.sh new file mode 100644 index 00000000..e60dda6a --- /dev/null +++ b/node/tests/tester_entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# This file works as an entrypoint of the kubernetes cluster running the tester binary copied inside of it. + +export RUST_LOG=INFO +./tester From 0d1511d58fd5fb2f7f2a6b3fd566924cd2d34305 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:19:01 -0300 Subject: [PATCH 101/139] Make tester a CLI tool to generate config and run the tests --- node/tests/src/main.rs | 86 ++++++++++++++++++++++++++++++++++++------ node/tools/src/k8s.rs | 21 ++++++++++- 2 files changed, 95 insertions(+), 12 deletions(-) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 3c97a86e..0c558948 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -1,22 +1,86 @@ //! This is a simple test for the RPC server. It checks if the server is running and can respond to. +use std::{fs, io::Write}; + +use clap::{Parser, Subcommand}; use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; -use zksync_consensus_tools::rpc::methods::{health_check::HealthCheck, RPCMethod}; +use zksync_consensus_tools::{ + k8s, + rpc::methods::{health_check::HealthCheck, RPCMethod}, +}; + +/// Command line arguments. +#[derive(Debug, Parser)] +#[command(name = "tester")] +struct TesterCLI { + /// Subcommand to run. + #[command(subcommand)] + command: TesterCommands, +} + +/// Subcommands. +#[derive(Subcommand, Debug)] +enum TesterCommands { + /// Generate configs for the nodes. + GenerateConfig, + /// Deploy the nodes. + Run, +} + +fn get_config_path() -> String { + // This way we can run the test from every directory and also inside kubernetes pod. + let manifest_path = std::env::var("CARGO_MANIFEST_DIR"); + if let Ok(manifest) = manifest_path { + format!("{}/config.txt", manifest) + } else { + "config.txt".to_owned() + } +} + +/// Generate a config file with the IPs of the consensus nodes in the kubernetes cluster. +pub async fn generate_config() { + let client = k8s::get_client().await.unwrap(); + let pods_ip = k8s::get_consensus_node_ips(&client).await.unwrap(); + let config_file_path: String = get_config_path(); + for ip in pods_ip { + let mut config_file = fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&config_file_path) + .unwrap(); + config_file.write_all(ip.as_bytes()).unwrap(); + } +} /// Sanity test for the RPC server. pub async fn sanity_test() { - let url = "http://127.0.0.1:3154"; - let rpc_client = HttpClientBuilder::default().build(url).unwrap(); - let params = Params::new(None); - let response: serde_json::Value = rpc_client - .request(HealthCheck::method(), rpc_params!()) - .await - .unwrap(); - assert_eq!(response, HealthCheck::callback(params).unwrap()); + let config_file_path = get_config_path(); + let node_ips = fs::read_to_string(config_file_path).unwrap(); + for ip in node_ips.lines() { + let url: String = format!("http://{}:3154", ip); + let rpc_client = HttpClientBuilder::default().build(url).unwrap(); + let params = Params::new(None); + let response: serde_json::Value = rpc_client + .request(HealthCheck::method(), rpc_params!()) + .await + .unwrap(); + assert_eq!(response, HealthCheck::callback(params).unwrap()); + } } /// Main function for the test. #[tokio::main] async fn main() { - sanity_test().await; - println!("IT WORKS!"); + let args = TesterCLI::parse(); + tracing::trace!(?args, "Starting node"); + match args.command { + TesterCommands::GenerateConfig => { + generate_config().await; + tracing::info!("Config succesfully generated") + } + TesterCommands::Run => { + sanity_test().await; + tracing::info!("Test passed!") + } + } } diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index bd19835f..89df7fca 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -2,7 +2,7 @@ use crate::{config, NodeAddr}; use anyhow::{anyhow, Context}; use k8s_openapi::api::{ apps::v1::Deployment, - core::v1::{Namespace, Pod, Service}, + core::v1::{Namespace, Pod}, }; use kube::{ api::{ListParams, PostParams}, @@ -21,6 +21,25 @@ pub async fn get_client() -> anyhow::Result { Ok(Client::try_default().await?) } +/// Get a kube client +pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { + let pods: Api = Api::namespaced(client.clone(), "consensus"); + let lp = ListParams::default(); + let pod = pods.list(&lp).await?; + let a: Vec = pod + .into_iter() + .filter(|pod| { + pod.clone() + .metadata + .name + .unwrap() + .starts_with("consensus-node") + }) + .map(|pod| pod.status.unwrap().pod_ip.unwrap()) + .collect(); + Ok(a) +} + /// Creates a namespace in k8s cluster pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::Result<()> { let namespaces: Api = Api::all(client.clone()); From 91632d0729dd173d9bf96d9f9501f7220ddaa32c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:19:16 -0300 Subject: [PATCH 102/139] Update Makefile with new tester commands --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b05b88e9..f84ff7a9 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ docker_build_executor: docker build --output=node/tools/docker_binaries --target=binary . docker_build_tester: - docker build --output=node/tools/docker_binaries --target=binary -f node/tests/Dockerfile node + docker build --output=node/tools/docker_binaries --target=binary -f node/tests/Dockerfile . docker_node_image: docker build -t consensus-node --target=runtime . @@ -50,6 +50,7 @@ start_k8s_nodes: cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer deploy --nodes ${NODES} start_k8s_tests: + cd node && cargo run --bin tester generate-config $(MAKE) docker_test_image minikube image load test-suite:latest kubectl apply -f node/tests/test_deployments.yaml From b97e0251cd8618ffb161cb64faa95e5138d9517a Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:19:48 -0300 Subject: [PATCH 103/139] Add clap and tracing dependencies to the tester crate --- node/Cargo.lock | 2 ++ node/tests/Cargo.toml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/node/Cargo.lock b/node/Cargo.lock index eae4f239..80df1538 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2936,9 +2936,11 @@ dependencies = [ name = "tester" version = "0.1.0" dependencies = [ + "clap", "jsonrpsee", "serde_json", "tokio", + "tracing", "zksync_consensus_tools", ] diff --git a/node/tests/Cargo.toml b/node/tests/Cargo.toml index fa1e9cd5..85d7d591 100644 --- a/node/tests/Cargo.toml +++ b/node/tests/Cargo.toml @@ -12,6 +12,8 @@ zksync_consensus_tools.workspace = true serde_json.workspace = true tokio.workspace = true jsonrpsee.workspace = true +clap.workspace = true +tracing.workspace = true [lints] workspace = true From 1a2dc84b8a09afb0152f10e5ec311fbfcdbf94f3 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:20:06 -0300 Subject: [PATCH 104/139] Remove service creation --- node/tools/src/bin/deployer.rs | 6 ------ node/tools/src/rpc/methods/mod.rs | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 07f4b457..fd1205cc 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -106,12 +106,6 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { NAMESPACE, ) .await?; - k8s::create_or_reuse_service( - &client, - &format!("consensus-node-{i:0>2}"), - &format!("consensus-node-{i:0>2}"), - ) - .await?; } // obtain seed peer(s) IP(s) diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index d1f9fa77..8c104ae5 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -11,5 +11,5 @@ pub trait RPCMethod { } pub(crate) mod config; -pub(crate) mod health_check; +pub mod health_check; pub(crate) mod peers; From aeae5fb09d65fa75946310114414bcc65f47cd7d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 12:22:00 -0300 Subject: [PATCH 105/139] Ignore binaries for bin tools generated in docker --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index c8556338..512b20b2 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ logs/ .terraform .ssh **/*terraform.tfstate* + +# Binaries generated in Docker +node/tools/docker_binaries From 61fa5b76274c9c512a297f8bbfeb35241fd8b706 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:18:38 -0300 Subject: [PATCH 106/139] Move makefile targets of tester to new makefile inside that crate --- Makefile | 16 ++-------------- node/tests/Makefile | 13 +++++++++++++ 2 files changed, 15 insertions(+), 14 deletions(-) create mode 100644 node/tests/Makefile diff --git a/Makefile b/Makefile index f84ff7a9..c545bf71 100644 --- a/Makefile +++ b/Makefile @@ -15,16 +15,10 @@ nodes_config: # Docker commands docker_build_executor: - docker build --output=node/tools/docker_binaries --target=binary . - -docker_build_tester: - docker build --output=node/tools/docker_binaries --target=binary -f node/tests/Dockerfile . + docker build --output=node/tools/docker_binaries --target=executor-binary . docker_node_image: - docker build -t consensus-node --target=runtime . - -docker_test_image: - docker build -t test-suite -f node/tests/Dockerfile --target=runtime node + docker build -t consensus-node --target=executor-runtime . docker_nodes_config: cd ${EXECUTABLE_NODE_DIR} && cargo run --bin localnet_config -- --input-addrs docker-config/addresses.txt --output-dir docker-config @@ -49,12 +43,6 @@ start_k8s_nodes: minikube image load consensus-node:latest cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer deploy --nodes ${NODES} -start_k8s_tests: - cd node && cargo run --bin tester generate-config - $(MAKE) docker_test_image - minikube image load test-suite:latest - kubectl apply -f node/tests/test_deployments.yaml - # Clean commands clean: clean_docker clean_k8s diff --git a/node/tests/Makefile b/node/tests/Makefile new file mode 100644 index 00000000..3aa70ec8 --- /dev/null +++ b/node/tests/Makefile @@ -0,0 +1,13 @@ +.PHONY: start_k8s_tests + +docker_test_image: + docker build -t test-suite --target=tester-runtime -f ../../Dockerfile ../../ + +docker_build_tester: + docker build --output=../tools/docker_binaries --target=tester-binary -f ../../Dockerfile ../../ + +start_k8s_tests: + cargo run --bin tester generate-config + $(MAKE) docker_test_image + minikube image load test-suite:latest + cargo run --bin tester start-pod From 6480c23693c6d2c22dd0eb5df90ca1aa1a833d4c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:18:57 -0300 Subject: [PATCH 107/139] Fix tester entrypoint to run the tests --- node/tests/tester_entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/tests/tester_entrypoint.sh b/node/tests/tester_entrypoint.sh index e60dda6a..6f96aeb0 100644 --- a/node/tests/tester_entrypoint.sh +++ b/node/tests/tester_entrypoint.sh @@ -2,4 +2,4 @@ # This file works as an entrypoint of the kubernetes cluster running the tester binary copied inside of it. export RUST_LOG=INFO -./tester +./tester run From 636ab28c79f5059e7fbebde88c285e39c3f97be5 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:19:56 -0300 Subject: [PATCH 108/139] Add function to deploy pod for tests inside rust --- node/tests/src/main.rs | 13 ++++++++- node/tools/src/k8s.rs | 61 ++++++++++++++++++++++++++++++++++++------ 2 files changed, 65 insertions(+), 9 deletions(-) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 0c558948..04f2c801 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -22,6 +22,8 @@ struct TesterCLI { enum TesterCommands { /// Generate configs for the nodes. GenerateConfig, + /// Set up the test pod. + StartPod, /// Deploy the nodes. Run, } @@ -52,6 +54,11 @@ pub async fn generate_config() { } } +pub async fn start_tests_pod() { + let client = k8s::get_client().await.unwrap(); + k8s::create_tests_deployment(&client).await.unwrap(); +} + /// Sanity test for the RPC server. pub async fn sanity_test() { let config_file_path = get_config_path(); @@ -72,12 +79,16 @@ pub async fn sanity_test() { #[tokio::main] async fn main() { let args = TesterCLI::parse(); - tracing::trace!(?args, "Starting node"); + match args.command { TesterCommands::GenerateConfig => { generate_config().await; tracing::info!("Config succesfully generated") } + TesterCommands::StartPod => { + start_tests_pod().await; + tracing::info!("Pod started succesfully!") + } TesterCommands::Run => { sanity_test().await; tracing::info!("Test passed!") diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 89df7fca..e7f9d714 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -82,6 +82,59 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R } } +pub async fn create_tests_deployment(client: &Client) -> anyhow::Result<()> { + let deployment: Deployment = serde_json::from_value(json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "tests-deployment", + "namespace": "consensus", + "labels": { + "app": "test-node" + } + }, + "spec": { + "selector": { + "matchLabels": { + "app": "test-node" + } + }, + "template": { + "metadata": { + "labels": { + "app": "test-node" + } + }, + "spec": { + "containers": [ + { + "name": "test-suite", + "image": "test-suite:latest", + "imagePullPolicy": "Never", + "command": [ + "./tester_entrypoint.sh" + ] + } + ] + } + } + } + }))?; + + let deployments: Api = Api::namespaced(client.clone(), "consensus"); + let post_params = PostParams::default(); + let result = deployments.create(&post_params, &deployment).await?; + + info!( + "Deployment: {} , created", + result + .metadata + .name + .context("Name not defined in metadata")? + ); + Ok(()) +} + /// Creates a deployment pub async fn create_deployment( client: &Client, @@ -167,14 +220,6 @@ pub async fn create_deployment( let deployments: Api = Api::namespaced(client.clone(), namespace); let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; - // tokio::time::sleep(tokio::time::Duration::from_secs(15)).await; - - // let pods: Api = Api::namespaced(client.clone(), namespace); - // let lp = ListParams::default().labels(&format!("app={}", node_name)); - // let pod = pods.list(&lp).await?; - // let a = pod.into_iter().find(|pod| pod.clone().metadata.name.unwrap().starts_with(node_name)).unwrap(); - // let pf = pods.portforward(&a.metadata.name.unwrap(), &[3150, 3154]).await; - // println!("Portforward: {:?}", pf.is_ok()); info!( "Deployment: {} , created", From a15b7bfaa9bf8c74d36c6c2b71d44d2f253f5e01 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:20:09 -0300 Subject: [PATCH 109/139] Update Dockerfile with new commands --- Dockerfile | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5f274ec1..c28c5d71 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,20 @@ # Build Stage FROM rust:latest as builder -COPY /node/ /node/ -WORKDIR /node +COPY /node/ /app/ +WORKDIR /app RUN apt-get update && apt-get install -y libclang-dev RUN cargo build --release # Binary copy stage -FROM scratch as binary -COPY --from=builder /node/target/release/executor . +FROM scratch as executor-binary +COPY --from=builder /app/target/release/executor . + +# Binary copy stage +FROM scratch as tester-binary +COPY --from=builder /app/target/release/tester . -# Runtime Stage -FROM debian:stable-slim as runtime +# Executor runtime Stage +FROM debian:stable-slim as executor-runtime COPY /node/tools/docker_binaries/executor /node/ COPY /node/tools/k8s_configs/ /node/k8s_config @@ -26,3 +30,14 @@ ENTRYPOINT ["./docker-entrypoint.sh"] EXPOSE 3054 EXPOSE 3051 + +# Tester runtime Stage +FROM debian:stable-slim as tester-runtime +COPY node/tools/docker_binaries/tester /test/ +COPY node/tests/tester_entrypoint.sh /test/ +COPY node/tests/config.txt /test/ + +WORKDIR /test + +RUN chmod +x tester_entrypoint.sh + From 1ae8e6b3d1f274970b03e4626d8211112ea052ef Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:20:25 -0300 Subject: [PATCH 110/139] Add readme to run tests pod --- node/tests/README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 node/tests/README.md diff --git a/node/tests/README.md b/node/tests/README.md new file mode 100644 index 00000000..8aead694 --- /dev/null +++ b/node/tests/README.md @@ -0,0 +1,17 @@ +# Tests overview + +This module contains was designed as a binary crate with the tests to be run against the nodes running in kubernetes. This tests also run inside the same k8s cluster as the nodes in a separate Pod. + +## How to run the tests Pod + +1. First you have to generate the executable for the `tester` to use it inside the kubernetes Pod. To do this use the following command: + > This could take a while but it'll be necessary only for the first time, if you already ran it, then skip this step and go ahead with the next one. + + ``` + make docker_build_tester + ``` +2. Use the following command to run the tests. This command will make a deployment, creating a pod in the kubernetes cluster and run the tests inside it + ``` + make start_k8s_tests + ``` + From 85b66d0e1eb0e7fe05a5383f543930093169f8b8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 15 Feb 2024 16:26:01 -0300 Subject: [PATCH 111/139] Delete outdated kubernetes yaml and dockerfile --- node/tests/Dockerfile | 17 ----------------- node/tests/test_deployments.yaml | 21 --------------------- 2 files changed, 38 deletions(-) delete mode 100644 node/tests/Dockerfile delete mode 100644 node/tests/test_deployments.yaml diff --git a/node/tests/Dockerfile b/node/tests/Dockerfile deleted file mode 100644 index fa733b0d..00000000 --- a/node/tests/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM rust:latest as builder -COPY /node /test/ -WORKDIR /test -RUN apt-get update && apt-get install -y libclang-dev -RUN cargo build --release - -FROM scratch as binary -COPY --from=builder test/target/release/tester . - -FROM debian:stable-slim as runtime -COPY /tools/docker_binaries/tester /test/ -COPY /tests/tester_entrypoint.sh /test/ -COPY /tests/config.txt /test/ - -WORKDIR /test - -RUN chmod +x tester_entrypoint.sh diff --git a/node/tests/test_deployments.yaml b/node/tests/test_deployments.yaml deleted file mode 100644 index b1a882eb..00000000 --- a/node/tests/test_deployments.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: tests-deployment - namespace: consensus - labels: - app: test-node -spec: - selector: - matchLabels: - app: test-node - template: - metadata: - labels: - app: test-node - spec: - containers: - - name: test-suite - image: test-suite:latest - imagePullPolicy: Never - command: ["./tester_entrypoint.sh"] From 5e8b01105270d67bb3d0c44f139fabaac8b7a312 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Thu, 15 Feb 2024 17:48:42 -0300 Subject: [PATCH 112/139] Several corrections on naming and coding style --- node/tools/src/bin/deployer.rs | 11 ++++------- node/tools/src/config.rs | 18 ++++++++---------- node/tools/src/k8s.rs | 19 ++++++++++--------- node/tools/src/lib.rs | 2 +- 4 files changed, 23 insertions(+), 27 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index fd1205cc..4cce2eb7 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -8,12 +8,9 @@ use clap::{Parser, Subcommand}; use rand::Rng; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::node::SecretKey; -use zksync_consensus_tools::k8s; -use zksync_consensus_tools::AppConfig; -use zksync_consensus_tools::NodeAddr; +use zksync_consensus_tools::{k8s, AppConfig, NodeAddr, NODES_PORT}; const NAMESPACE: &str = "consensus"; -const NODES_PORT: u16 = 3054; /// Command line arguments. #[derive(Debug, Parser)] @@ -98,7 +95,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { // deploy seed peer(s) for i in 0..seed_nodes { - k8s::create_deployment( + k8s::deploy_node( &client, i, true, @@ -109,7 +106,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { } // obtain seed peer(s) IP(s) - let peer_ips = k8s::get_seed_node_addrs(&client, seed_nodes).await?; + let peer_ips = k8s::get_seed_node_addrs(&client, seed_nodes, NAMESPACE).await?; let mut peers = vec![]; @@ -125,7 +122,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { // deploy the rest of nodes for i in seed_nodes..nodes { - k8s::create_deployment(&client, i, false, peers.clone(), NAMESPACE).await?; + k8s::deploy_node(&client, i, false, peers.clone(), NAMESPACE).await?; } Ok(()) diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 69b603ed..4c17bc0d 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -18,7 +18,7 @@ use zksync_consensus_storage::{BlockStore, BlockStoreRunner, PersistentBlockStor use zksync_protobuf::{required, serde::Serde, ProtoFmt}; /// Ports for the nodes to listen on kubernetes pod. -const NODES_PORT: u16 = 3054; +pub const NODES_PORT: u16 = 3054; /// Decodes a proto message from json for arbitrary ProtoFmt. pub fn decode_json(json: &str) -> anyhow::Result { @@ -89,15 +89,6 @@ pub struct AppConfig { pub gossip_static_outbound: HashMap, } -impl AppConfig { - pub fn check_public_addr(&mut self) -> anyhow::Result<()> { - if let Ok(public_addr) = std::env::var("PUBLIC_ADDR") { - self.public_addr = SocketAddr::from_str(&format!("{public_addr}:{NODES_PORT}"))?; - } - Ok(()) - } -} - impl ProtoFmt for AppConfig { type Proto = proto::AppConfig; @@ -304,6 +295,13 @@ impl AppConfig { self.max_payload_size = max_payload_size; self } + + pub fn check_public_addr(&mut self) -> anyhow::Result<()> { + if let Ok(public_addr) = std::env::var("PUBLIC_ADDR") { + self.public_addr = SocketAddr::from_str(&format!("{public_addr}:{NODES_PORT}"))?; + } + Ok(()) + } } impl Configs { diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index c414b593..1545359d 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -30,9 +30,9 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R "apiVersion": "v1", "kind": "Namespace", "metadata": { - "name": "consensus", + "name": name, "labels": { - "name": "consensus" + "name": name } } }))?; @@ -64,22 +64,22 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R } /// Creates a deployment -pub async fn create_deployment( +pub async fn deploy_node( client: &Client, - node_number: usize, + node_index: usize, is_seed: bool, peers: Vec, namespace: &str, ) -> anyhow::Result<()> { let cli_args = get_cli_args(peers); - let node_name = format!("consensus-node-{node_number:0>2}"); - let node_id = format!("node_{node_number:0>2}"); + let node_name = format!("consensus-node-{node_index:0>2}"); + let node_id = format!("node_{node_index:0>2}"); let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": node_name, - "namespace": "consensus" + "namespace": namespace }, "spec": { "selector": { @@ -120,7 +120,7 @@ pub async fn create_deployment( "imagePullPolicy": "Never", "ports": [ { - "containerPort": 3054 + "containerPort": config::NODES_PORT }, { "containerPort": 3154 @@ -163,9 +163,10 @@ pub async fn create_deployment( pub async fn get_seed_node_addrs( client: &Client, amount: usize, + namespace: &str, ) -> anyhow::Result> { let mut seed_nodes = HashMap::new(); - let pods: Api = Api::namespaced(client.clone(), "consensus"); + let pods: Api = Api::namespaced(client.clone(), namespace); // Will retry 15 times during 15 seconds to allow pods to start and obtain an IP let retry_strategy = FixedInterval::from_millis(1000).take(15); diff --git a/node/tools/src/lib.rs b/node/tools/src/lib.rs index 229d98c1..e0c8d2a1 100644 --- a/node/tools/src/lib.rs +++ b/node/tools/src/lib.rs @@ -9,5 +9,5 @@ mod store; #[cfg(test)] mod tests; -pub use config::{decode_json, AppConfig, ConfigPaths, NodeAddr}; +pub use config::{decode_json, AppConfig, ConfigPaths, NodeAddr, NODES_PORT}; pub use rpc::server::RPCServer; From 6ef448e6d69d877127ac79f5c7ae55710d4b1f9c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 16 Feb 2024 10:27:49 -0300 Subject: [PATCH 113/139] Fix missing documentation --- node/tests/src/main.rs | 3 +++ node/tools/src/config.rs | 1 + node/tools/src/rpc/methods/config.rs | 1 + 3 files changed, 5 insertions(+) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 04f2c801..72b56f05 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -28,6 +28,8 @@ enum TesterCommands { Run, } +/// Get the path of the node ips config file. +/// This way we can run the test from every directory and also inside kubernetes pod. fn get_config_path() -> String { // This way we can run the test from every directory and also inside kubernetes pod. let manifest_path = std::env::var("CARGO_MANIFEST_DIR"); @@ -54,6 +56,7 @@ pub async fn generate_config() { } } +/// Start the tests pod in the kubernetes cluster. pub async fn start_tests_pod() { let client = k8s::get_client().await.unwrap(); k8s::create_tests_deployment(&client).await.unwrap(); diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 69b603ed..b81960f4 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -34,6 +34,7 @@ pub(crate) fn encode_json(x: &T) -> String { encode_with_serializer(x, s) } +/// Encodes a generated proto message to json for arbitrary ProtoFmt with a custom serializer. pub(crate) fn encode_with_serializer( x: &T, mut serializer: Serializer, F>, diff --git a/node/tools/src/rpc/methods/config.rs b/node/tools/src/rpc/methods/config.rs index 0d22ec4d..20094dd6 100644 --- a/node/tools/src/rpc/methods/config.rs +++ b/node/tools/src/rpc/methods/config.rs @@ -12,6 +12,7 @@ pub(crate) struct ConfigInfo; // RPCMethod trait should be more general to allow external parameters like this case // TODO fix the trait and implement this code in it impl ConfigInfo { + /// Returns the config of the node, this reads the config.json file so does not support live updates. pub(crate) fn info(config: AppConfig) -> Result { // This may change in the future since we are assuming that the executor binary is being run inside the config directory. Ok(serde_json::json!({ From 4cf876f644c5cce5412e23dd9f936f2e02009e0f Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 16 Feb 2024 10:28:13 -0300 Subject: [PATCH 114/139] Fix deny toml file --- node/deny.toml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/node/deny.toml b/node/deny.toml index 37f6ede5..6bda1f83 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -40,6 +40,18 @@ allow = [ "MPL-2.0", ] +[[licenses.clarify]] +name = "ring" +# SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses +# https://spdx.org/licenses/OpenSSL.html +# ISC - Both BoringSSL and ring use this for their new files +# MIT - "Files in third_party/ have their own licenses, as described therein. The MIT +# license, for third_party/fiat, which, unlike other third_party directories, is +# compiled into non-test libraries, is included below." +# OpenSSL - Obviously +expression = "MIT" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "deny" @@ -60,6 +72,9 @@ skip = [ { name = "base64", version = "0.13.1" }, { name = "block-buffer", version = "0.9.0" }, { name = "digest", version = "0.10.7" }, + + # Old versions required by kube. + { name = "strsim", version = "0.10.0" } ] [sources] From c62f0b4953872c35bc22dba1b001b21ab086aea2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Fri, 16 Feb 2024 11:51:29 -0300 Subject: [PATCH 115/139] Fixed deny.toml for kube.rs --- node/deny.toml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/node/deny.toml b/node/deny.toml index 37f6ede5..6bda1f83 100644 --- a/node/deny.toml +++ b/node/deny.toml @@ -40,6 +40,18 @@ allow = [ "MPL-2.0", ] +[[licenses.clarify]] +name = "ring" +# SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses +# https://spdx.org/licenses/OpenSSL.html +# ISC - Both BoringSSL and ring use this for their new files +# MIT - "Files in third_party/ have their own licenses, as described therein. The MIT +# license, for third_party/fiat, which, unlike other third_party directories, is +# compiled into non-test libraries, is included below." +# OpenSSL - Obviously +expression = "MIT" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "deny" @@ -60,6 +72,9 @@ skip = [ { name = "base64", version = "0.13.1" }, { name = "block-buffer", version = "0.9.0" }, { name = "digest", version = "0.10.7" }, + + # Old versions required by kube. + { name = "strsim", version = "0.10.0" } ] [sources] From 2e89af872ddb97d333295aaab52b571a3eb253df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Mon, 19 Feb 2024 10:16:12 -0300 Subject: [PATCH 116/139] Fixed clippy suggestions --- node/tools/src/config.rs | 1 + node/tools/src/rpc/methods/config.rs | 1 + node/tools/src/tests.rs | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 4c17bc0d..55439d32 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -34,6 +34,7 @@ pub(crate) fn encode_json(x: &T) -> String { encode_with_serializer(x, s) } +/// Encodes a generated proto message for arbitrary ProtoFmt with provided serializer. pub(crate) fn encode_with_serializer( x: &T, mut serializer: Serializer, F>, diff --git a/node/tools/src/rpc/methods/config.rs b/node/tools/src/rpc/methods/config.rs index 0d22ec4d..f03e9353 100644 --- a/node/tools/src/rpc/methods/config.rs +++ b/node/tools/src/rpc/methods/config.rs @@ -12,6 +12,7 @@ pub(crate) struct ConfigInfo; // RPCMethod trait should be more general to allow external parameters like this case // TODO fix the trait and implement this code in it impl ConfigInfo { + /// Provide the node's config information pub(crate) fn info(config: AppConfig) -> Result { // This may change in the future since we are assuming that the executor binary is being run inside the config directory. Ok(serde_json::json!({ diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index aa4e8fbb..f758349c 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -23,10 +23,10 @@ impl Distribution for Standard { .with_gossip_dynamic_inbound_limit(rng.gen()) .with_gossip_dynamic_inbound_limit(rng.gen()) .with_max_payload_size(rng.gen()); - (0..5).into_iter().for_each(|_| { + (0..5).for_each(|_| { let _ = config.add_gossip_static_inbound(rng.gen::().public()); }); - (0..6).into_iter().for_each(|_| { + (0..6).for_each(|_| { let _ = config .add_gossip_static_outbound(rng.gen::().public(), make_addr(rng)); }); From fb0c345ddc39fac72002b9dab664c76d34f4dba8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Mon, 19 Feb 2024 16:10:22 -0300 Subject: [PATCH 117/139] Added amount of seed nodes as cli argument --- Makefile | 3 ++- node/tools/src/bin/deployer.rs | 14 +++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 2f2727f0..db4a4a9a 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,7 @@ NODE?=0 DOCKER_IP=172.12.0.10 EXECUTABLE_NODE_DIR=node/tools NODES=4 +SEED_NODES=1 # Locally run commands @@ -41,7 +42,7 @@ start_k8s_nodes: cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer generate-config --nodes ${NODES} $(MAKE) docker_node_image minikube image load consensus-node:latest - cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer deploy --nodes ${NODES} + cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer deploy --nodes ${NODES} --seed-nodes ${SEED_NODES} # Clean commands diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 4cce2eb7..2d0d31a7 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -10,6 +10,7 @@ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::node::SecretKey; use zksync_consensus_tools::{k8s, AppConfig, NodeAddr, NODES_PORT}; +/// K8s namespace for consensus nodes. const NAMESPACE: &str = "consensus"; /// Command line arguments. @@ -24,9 +25,12 @@ struct DeployerCLI { /// Subcommand arguments. #[derive(Debug, Parser)] struct SubCommandArgs { - /// Number of nodes to deploy. + /// Number of total nodes to deploy. #[arg(long)] nodes: usize, + /// Number of seed nodes to deploy. + #[arg(long)] + seed_nodes: Option, } /// Subcommands. @@ -86,12 +90,11 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { } /// Deploys the nodes to the kubernetes cluster. -async fn deploy(nodes: usize) -> anyhow::Result<()> { +async fn deploy(nodes: usize, seed_nodes: Option) -> anyhow::Result<()> { let client = k8s::get_client().await?; k8s::create_or_reuse_namespace(&client, NAMESPACE).await?; - // 20% of the nodes will be seed nodes - let seed_nodes = (nodes as f32 * 0.2).ceil() as usize; + let seed_nodes = seed_nodes.unwrap_or(1); // deploy seed peer(s) for i in 0..seed_nodes { @@ -128,6 +131,7 @@ async fn deploy(nodes: usize) -> anyhow::Result<()> { Ok(()) } +/// Obtain node key from config file. fn read_node_key_from_config(node_id: &String) -> anyhow::Result { let manifest_path = std::env::var("CARGO_MANIFEST_DIR")?; let root = PathBuf::from(manifest_path).join("k8s_configs"); @@ -142,6 +146,6 @@ async fn main() -> anyhow::Result<()> { match command { DeployerCommands::GenerateConfig(args) => generate_config(args.nodes), - DeployerCommands::Deploy(args) => deploy(args.nodes).await, + DeployerCommands::Deploy(args) => deploy(args.nodes, args.seed_nodes).await, } } From 23a77614055770d11887aefa062ae60c356a7a6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Tue, 20 Feb 2024 16:30:42 -0300 Subject: [PATCH 118/139] Added brief description for k8s deployment in README.md --- node/tools/README.md | 48 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/node/tools/README.md b/node/tools/README.md index 0ee9d3b0..b6349665 100644 --- a/node/tools/README.md +++ b/node/tools/README.md @@ -53,3 +53,51 @@ make clean_docker ``` > This deletes the generated images and containers, requiring regeneration. + + +## Running in minikube + +To run a number of nodes locally in minikube, first we need to build the binary: + +```bash +make docker_build_executor +``` + +This command will create the `executor` binary to be included in a docker image. + +Before running the deployment script, ensure minikube is installed and running: + +```bash +minikube start +``` + +Then run + +```bash +make start_k8s_nodes NODES= SEED_NODES= +``` + +Here, `NODES` is the desired total amount of nodes to run in the k8s cluster (defaults to 4 if omitted), and `SEED_NODES` is the amount of those nodes to be deployed first as seed nodes (defaults to 1). + +This command will: +- Generate the configuration files for each node (this step may change in the future, as we may not need to include configuration files in the images) +- Build a docker image with the configuration files and the binary created in previous step +- Deploy the initial seed nodes +- Obtain the internal IP addresses of the seed nodes +- Deploy the rest of the nodes providing the seed nodes IP addresses to establish the connections + +You may run + +```bash +minikube dashboard +``` + +To start the minikube dashboard in order to inspect the deployed pods. Remember to use `consensus` namespace to find all consensus related infrastructure. + +Finally to clean everything up + +```bash +minikube delete --all +``` + +will remove all namespaces, deployments and pods from the minikube environment. \ No newline at end of file From 7dbf270ff4abb5d138571267cadb3e8680267910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Esteban=20Dimitroff=20H=C3=B3di?= Date: Tue, 20 Feb 2024 17:46:09 -0300 Subject: [PATCH 119/139] Unified node_name and node_id values --- Makefile | 2 +- node/tools/src/bin/deployer.rs | 4 ++-- node/tools/src/k8s.rs | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index db4a4a9a..e3d047a5 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ docker_nodes_config: docker_node: $(MAKE) docker_node_image - docker run -d --name consensus-node-${NODE} --env NODE_ID="node_${NODE}" consensus-node + docker run -d --name consensus-node-${NODE} --env NODE_ID="consensus-node-${NODE}" consensus-node consensus_docker_example: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 2d0d31a7..28ba2cf9 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -69,7 +69,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { let root = PathBuf::from(manifest_path).join("k8s_configs"); let _ = fs::remove_dir_all(&root); for (i, cfg) in cfgs.into_iter().enumerate() { - let node_config_dir = root.join(format!("node_{i:0>2}")); + let node_config_dir = root.join(format!("consensus-node-{i:0>2}")); fs::create_dir_all(&node_config_dir) .with_context(|| format!("create_dir_all({:?})", node_config_dir))?; @@ -114,7 +114,7 @@ async fn deploy(nodes: usize, seed_nodes: Option) -> anyhow::Result<()> { let mut peers = vec![]; for i in 0..seed_nodes { - let node_id = &format!("node_{i:0>2}"); + let node_id = &format!("consensus-node-{i:0>2}"); let node_key = read_node_key_from_config(node_id)?; let address = peer_ips.get(node_id).context("IP address not found")?; peers.push(NodeAddr { diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 1545359d..50bf005f 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -73,7 +73,6 @@ pub async fn deploy_node( ) -> anyhow::Result<()> { let cli_args = get_cli_args(peers); let node_name = format!("consensus-node-{node_index:0>2}"); - let node_id = format!("node_{node_index:0>2}"); let deployment: Deployment = serde_json::from_value(json!({ "apiVersion": "apps/v1", "kind": "Deployment", @@ -92,7 +91,7 @@ pub async fn deploy_node( "metadata": { "labels": { "app": node_name, - "id": node_id, + "id": node_name, "seed": is_seed.to_string() } }, @@ -104,7 +103,7 @@ pub async fn deploy_node( "env": [ { "name": "NODE_ID", - "value": node_id + "value": node_name }, { "name": "PUBLIC_ADDR", From 53b805d4b4194d4a2d8e3b79d1e5229b81321445 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 20 Feb 2024 18:53:28 -0300 Subject: [PATCH 120/139] Add node storage as parameter for the rpc server --- node/tools/src/main.rs | 5 ++++- node/tools/src/rpc/server.rs | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 428c6c09..ed417fc4 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -2,6 +2,7 @@ //! manages communication between the actors. It is the main executable in this workspace. use anyhow::Context as _; use clap::Parser; +use k8s_openapi::api::node; use std::{fs, io::IsTerminal as _, path::PathBuf}; use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; @@ -125,9 +126,11 @@ async fn main() -> anyhow::Result<()> { rpc_addr.set_port(rpc_addr.port() + 100); } + let node_storage = executor.block_store.clone(); + // cloning configuration to let RPCServer show it // TODO this should be queried in real time instead, to reflect any possible change in config - let rpc_server = RPCServer::new(rpc_addr, configs.app.clone()); + let rpc_server = RPCServer::new(rpc_addr, configs.app.clone(), node_storage); // Initialize the storage. scope::run!(ctx, |ctx, s| async { diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 3b66dfa0..dc420f16 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,9 +1,12 @@ use crate::AppConfig; -use super::methods::{config::ConfigInfo, health_check::HealthCheck, peers::PeersInfo, RPCMethod}; +use super::methods::{ + config::ConfigInfo, health_check::HealthCheck, last_view::LastView, peers::PeersInfo, RPCMethod, +}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; -use std::net::SocketAddr; +use std::{net::SocketAddr, sync::Arc}; use zksync_concurrency::{ctx, scope}; +use zksync_consensus_storage::BlockStore; /// RPC server. pub struct RPCServer { @@ -11,11 +14,16 @@ pub struct RPCServer { ip_address: SocketAddr, /// AppConfig config: AppConfig, + node_storage: Arc, } impl RPCServer { - pub fn new(ip_address: SocketAddr, config: AppConfig) -> Self { - Self { ip_address, config } + pub fn new(ip_address: SocketAddr, config: AppConfig, node_storage: Arc) -> Self { + Self { + ip_address, + config, + node_storage, + } } /// Runs the RPC server. From c72b9cac4f6d97c1069ebf02ead661f2974a4121 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Tue, 20 Feb 2024 18:53:49 -0300 Subject: [PATCH 121/139] Create new endpoint to check last view of the node --- .../roles/src/validator/messages/consensus.rs | 3 +- node/tools/src/rpc/methods/last_view.rs | 50 +++++++++++++++++++ node/tools/src/rpc/methods/mod.rs | 1 + node/tools/src/rpc/server.rs | 9 ++++ 4 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 node/tools/src/rpc/methods/last_view.rs diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index d7a1d9ba..927f26f9 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -4,6 +4,7 @@ use super::{BlockHeader, Msg, Payload, Signed}; use crate::{validator, validator::Signature}; use anyhow::bail; use bit_vec::BitVec; +use serde::Serialize; use std::collections::{BTreeMap, BTreeSet}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; @@ -417,7 +418,7 @@ impl ValidatorSet { } /// A struct that represents a view number. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] pub struct ViewNumber(pub u64); impl ViewNumber { diff --git a/node/tools/src/rpc/methods/last_view.rs b/node/tools/src/rpc/methods/last_view.rs new file mode 100644 index 00000000..2f86631c --- /dev/null +++ b/node/tools/src/rpc/methods/last_view.rs @@ -0,0 +1,50 @@ +//! Peers method for RPC server. +use crate::{config::encode_json, decode_json, AppConfig}; + +use super::RPCMethod; +use jsonrpsee::types::{error::ErrorCode, Params}; +use std::{fs, sync::Arc}; +use zksync_consensus_storage::{BlockStore, ReplicaState}; +use zksync_protobuf::serde::Serde; + +/// Config method for RPC server. +pub(crate) struct LastView; + +impl LastView { + /// Provide the node's config information + pub(crate) fn info(node_storage: Arc) -> Result { + let block = node_storage; + let sub = &mut block.subscribe(); + let state = sub.borrow().clone(); + let replica_state = ReplicaState::from(state.last).view; + let replica_state = + serde_json::to_value(replica_state).map_err(|_e| ErrorCode::InternalError); + replica_state + } +} + +impl RPCMethod for LastView { + /// Config response for /config endpoint. + fn callback(_params: Params) -> Result { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + let node_config = + fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; + let node_config = decode_json::>(&node_config) + .map_err(|_e| ErrorCode::InternalError)? + .0; + let config = encode_json(&Serde(node_config)); + Ok(serde_json::json!({ + "config": config + })) + } + + /// Config method name. + fn method() -> &'static str { + "last_view" + } + + /// Method path for GET requests. + fn path() -> &'static str { + "/last_view" + } +} diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 8c104ae5..bcdf6a98 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -12,4 +12,5 @@ pub trait RPCMethod { pub(crate) mod config; pub mod health_check; +pub mod last_view; pub(crate) mod peers; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index dc420f16..2d68bc5e 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -42,6 +42,10 @@ impl RPCServer { .layer(ProxyGetRequestLayer::new( ConfigInfo::path(), ConfigInfo::method(), + )?) + .layer(ProxyGetRequestLayer::new( + LastView::path(), + LastView::method(), )?); let server = Server::builder() @@ -61,6 +65,11 @@ impl RPCServer { ConfigInfo::info(config.clone()) })?; + let node_storage = self.node_storage.clone(); + module.register_method(LastView::method(), move |_params, _| { + LastView::info(node_storage.clone()) + })?; + let handle = server.start(module); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { From 9b309c45bce1883be7ab53891d3bbb63bd9ee9bb Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 21 Feb 2024 17:19:08 -0300 Subject: [PATCH 122/139] Remove unwraps and improve error handling --- node/tests/src/main.rs | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 72b56f05..cf2161c7 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -1,6 +1,7 @@ //! This is a simple test for the RPC server. It checks if the server is running and can respond to. -use std::{fs, io::Write}; +use std::{fs, io::Write, path::PathBuf}; +use anyhow::Context; use clap::{Parser, Subcommand}; use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; use zksync_consensus_tools::{ @@ -30,36 +31,39 @@ enum TesterCommands { /// Get the path of the node ips config file. /// This way we can run the test from every directory and also inside kubernetes pod. -fn get_config_path() -> String { +fn get_config_path() -> PathBuf { // This way we can run the test from every directory and also inside kubernetes pod. let manifest_path = std::env::var("CARGO_MANIFEST_DIR"); if let Ok(manifest) = manifest_path { - format!("{}/config.txt", manifest) + PathBuf::from(&format!("{}/config.txt", manifest)) } else { - "config.txt".to_owned() + PathBuf::from("config.txt") } } /// Generate a config file with the IPs of the consensus nodes in the kubernetes cluster. -pub async fn generate_config() { - let client = k8s::get_client().await.unwrap(); - let pods_ip = k8s::get_consensus_node_ips(&client).await.unwrap(); - let config_file_path: String = get_config_path(); - for ip in pods_ip { +pub async fn generate_config() -> anyhow::Result<()> { + let client = k8s::get_client().await?; + let pods_ip = k8s::get_consensus_node_ips(&client).await?; + let config_file_path = get_config_path(); + for addr in pods_ip { let mut config_file = fs::OpenOptions::new() .write(true) .create(true) .truncate(true) - .open(&config_file_path) - .unwrap(); - config_file.write_all(ip.as_bytes()).unwrap(); + .open(&config_file_path)?; + config_file.write_all(addr.to_string().as_bytes())?; } + Ok(()) } /// Start the tests pod in the kubernetes cluster. -pub async fn start_tests_pod() { - let client = k8s::get_client().await.unwrap(); - k8s::create_tests_deployment(&client).await.unwrap(); +pub async fn start_tests_pod() -> anyhow::Result<()> { + let client = k8s::get_client().await?; + k8s::create_tests_deployment(&client) + .await + .context("Failed to create tests pod")?; + Ok(()) } /// Sanity test for the RPC server. @@ -85,11 +89,11 @@ async fn main() { match args.command { TesterCommands::GenerateConfig => { - generate_config().await; + let _ = generate_config().await; tracing::info!("Config succesfully generated") } TesterCommands::StartPod => { - start_tests_pod().await; + let _ = start_tests_pod().await; tracing::info!("Pod started succesfully!") } TesterCommands::Run => { From f15d2bf704cbe6773a3be83084bd9eecfe47ed0c Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 21 Feb 2024 17:21:53 -0300 Subject: [PATCH 123/139] Change hardcoded deployment json to deployment strcut --- node/tools/src/k8s.rs | 98 ++++++++++++++++++++++++------------------- 1 file changed, 56 insertions(+), 42 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index fec70e4a..4ce285f1 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,16 +1,19 @@ use crate::{config, NodeAddr}; -use anyhow::{anyhow, Context}; -use k8s_openapi::api::{ - apps::v1::Deployment, - core::v1::{Namespace, Pod}, +use anyhow::{anyhow, bail, Context}; +use k8s_openapi::{ + api::{ + apps::v1::{Deployment, DeploymentSpec}, + core::v1::{Container, Namespace, Pod, PodSpec, PodStatus, PodTemplateSpec}, + }, + apimachinery::pkg::apis::meta::v1::LabelSelector, }; use kube::{ api::{ListParams, PostParams}, - core::ObjectList, + core::{ObjectList, ObjectMeta}, Api, Client, ResourceExt, }; use serde_json::json; -use std::collections::HashMap; +use std::{collections::HashMap, net::SocketAddr, str::FromStr}; use tokio_retry::strategy::FixedInterval; use tokio_retry::Retry; use tracing::log::info; @@ -83,43 +86,54 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R } pub async fn create_tests_deployment(client: &Client) -> anyhow::Result<()> { - let deployment: Deployment = serde_json::from_value(json!({ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": "tests-deployment", - "namespace": "consensus", - "labels": { - "app": "test-node" - } - }, - "spec": { - "selector": { - "matchLabels": { - "app": "test-node" - } + let deployment: Deployment = Deployment { + metadata: ObjectMeta { + name: Some("tests-deployment".to_string()), + namespace: Some("consensus".to_string()), + labels: Some( + [("app".to_string(), "test-node".to_string())] + .iter() + .cloned() + .collect(), + ), + ..Default::default() }, - "template": { - "metadata": { - "labels": { - "app": "test-node" - } - }, - "spec": { - "containers": [ - { - "name": "test-suite", - "image": "test-suite:latest", - "imagePullPolicy": "Never", - "command": [ - "./tester_entrypoint.sh" - ] - } - ] - } - } - } - }))?; + spec: Some(DeploymentSpec { + selector: LabelSelector { + match_labels: Some( + [("app".to_string(), "test-node".to_string())] + .iter() + .cloned() + .collect(), + ), + ..Default::default() + }, + replicas: Some(1), + template: PodTemplateSpec { + metadata: Some(ObjectMeta { + labels: Some( + [("app".to_string(), "test-node".to_string())] + .iter() + .cloned() + .collect(), + ), + ..Default::default() + }), + spec: Some(PodSpec { + containers: vec![Container { + name: "test-suite".to_string(), + image: Some("test-suite:latest".to_string()), + image_pull_policy: Some("Never".to_string()), + command: Some(vec!["./tester_entrypoint.sh".to_string()]), + ..Default::default() + }], + ..Default::default() + }), + }, + ..Default::default() + }), + ..Default::default() + }; let deployments: Api = Api::namespaced(client.clone(), "consensus"); let post_params = PostParams::default(); From 04f1a2f08d9460bbad5cf70af5d5756a5e0b0313 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 21 Feb 2024 17:22:43 -0300 Subject: [PATCH 124/139] Remove unwraps in k8s module --- node/tools/src/k8s.rs | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 4ce285f1..95a9b103 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -25,22 +25,38 @@ pub async fn get_client() -> anyhow::Result { } /// Get a kube client -pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { +pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { let pods: Api = Api::namespaced(client.clone(), "consensus"); let lp = ListParams::default(); let pod = pods.list(&lp).await?; - let a: Vec = pod + let a: Result, _> = pod .into_iter() .filter(|pod| { - pod.clone() - .metadata - .name - .unwrap() - .starts_with("consensus-node") + if let Some(pod) = pod.clone().metadata.name { + pod.contains("consensus-node") + } else { + false + } + }) + .map(|pod| { + let pod_ip = pod.status.and_then(|status: PodStatus| status.pod_ip); + let port = pod.spec.and_then(|spec: PodSpec| { + spec.containers[0].clone().ports.and_then(|ports| { + ports + .iter() + .find(|port| port.container_port != config::NODES_PORT as i32) + .map(|port| port.container_port) + }) + }); + if let Some((pod_ip, port)) = pod_ip.zip(port) { + let pod_addr = format!("{}:{}", pod_ip, port); + SocketAddr::from_str(&pod_addr).map_err(|e| e.into()) + } else { + bail!("Consensus Pod IP or port not found"); + } }) - .map(|pod| pod.status.unwrap().pod_ip.unwrap()) .collect(); - Ok(a) + a } /// Creates a namespace in k8s cluster From 93824c018dc692215ba133f6c900bf8bf0fa22b8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Wed, 21 Feb 2024 18:41:42 -0300 Subject: [PATCH 125/139] Add anyhow as dependency for tests crate --- node/Cargo.lock | 1 + node/tests/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/node/Cargo.lock b/node/Cargo.lock index 80df1538..1d07f37e 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2936,6 +2936,7 @@ dependencies = [ name = "tester" version = "0.1.0" dependencies = [ + "anyhow", "clap", "jsonrpsee", "serde_json", diff --git a/node/tests/Cargo.toml b/node/tests/Cargo.toml index 85d7d591..18c0a5aa 100644 --- a/node/tests/Cargo.toml +++ b/node/tests/Cargo.toml @@ -14,6 +14,7 @@ tokio.workspace = true jsonrpsee.workspace = true clap.workspace = true tracing.workspace = true +anyhow.workspace = true [lints] workspace = true From be03154fa67809c34264c5ef8b8e6c1bad06de7d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 10:46:08 -0300 Subject: [PATCH 126/139] Get pods from consensus docker image --- node/tools/src/bin/deployer.rs | 11 ++++------- node/tools/src/k8s.rs | 24 +++++++++++++++++------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 28ba2cf9..72ca848b 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -10,9 +10,6 @@ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::node::SecretKey; use zksync_consensus_tools::{k8s, AppConfig, NodeAddr, NODES_PORT}; -/// K8s namespace for consensus nodes. -const NAMESPACE: &str = "consensus"; - /// Command line arguments. #[derive(Debug, Parser)] #[command(name = "deployer")] @@ -92,7 +89,7 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> { /// Deploys the nodes to the kubernetes cluster. async fn deploy(nodes: usize, seed_nodes: Option) -> anyhow::Result<()> { let client = k8s::get_client().await?; - k8s::create_or_reuse_namespace(&client, NAMESPACE).await?; + k8s::create_or_reuse_namespace(&client, k8s::DEFAULT_NAMESPACE).await?; let seed_nodes = seed_nodes.unwrap_or(1); @@ -103,13 +100,13 @@ async fn deploy(nodes: usize, seed_nodes: Option) -> anyhow::Result<()> { i, true, vec![], // Seed peers don't have other peer information - NAMESPACE, + k8s::DEFAULT_NAMESPACE, ) .await?; } // obtain seed peer(s) IP(s) - let peer_ips = k8s::get_seed_node_addrs(&client, seed_nodes, NAMESPACE).await?; + let peer_ips = k8s::get_seed_node_addrs(&client, seed_nodes, k8s::DEFAULT_NAMESPACE).await?; let mut peers = vec![]; @@ -125,7 +122,7 @@ async fn deploy(nodes: usize, seed_nodes: Option) -> anyhow::Result<()> { // deploy the rest of nodes for i in seed_nodes..nodes { - k8s::deploy_node(&client, i, false, peers.clone(), NAMESPACE).await?; + k8s::deploy_node(&client, i, false, peers.clone(), k8s::DEFAULT_NAMESPACE).await?; } Ok(()) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 95a9b103..97d2add8 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -19,21 +19,31 @@ use tokio_retry::Retry; use tracing::log::info; use zksync_protobuf::serde::Serde; +/// Docker image name for consensus nodes. +const DOCKER_IMAGE_NAME: &str = "consensus-node"; + +/// K8s namespace for consensus nodes. +pub const DEFAULT_NAMESPACE: &str = "consensus"; + /// Get a kube client pub async fn get_client() -> anyhow::Result { Ok(Client::try_default().await?) } -/// Get a kube client +/// Get the IP addresses and the exposed port of the RPC server of the consensus nodes in the kubernetes cluster. pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { - let pods: Api = Api::namespaced(client.clone(), "consensus"); + let pods: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); let lp = ListParams::default(); let pod = pods.list(&lp).await?; let a: Result, _> = pod .into_iter() .filter(|pod| { - if let Some(pod) = pod.clone().metadata.name { - pod.contains("consensus-node") + let docker_image = pod + .spec + .clone() + .and_then(|spec: PodSpec| spec.containers[0].clone().image); + if let Some(docker_image) = docker_image { + docker_image.contains(DOCKER_IMAGE_NAME) } else { false } @@ -105,7 +115,7 @@ pub async fn create_tests_deployment(client: &Client) -> anyhow::Result<()> { let deployment: Deployment = Deployment { metadata: ObjectMeta { name: Some("tests-deployment".to_string()), - namespace: Some("consensus".to_string()), + namespace: Some(DEFAULT_NAMESPACE.to_string()), labels: Some( [("app".to_string(), "test-node".to_string())] .iter() @@ -151,7 +161,7 @@ pub async fn create_tests_deployment(client: &Client) -> anyhow::Result<()> { ..Default::default() }; - let deployments: Api = Api::namespaced(client.clone(), "consensus"); + let deployments: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; @@ -201,7 +211,7 @@ pub async fn deploy_node( "containers": [ { "name": node_name, - "image": "consensus-node", + "image": DOCKER_IMAGE_NAME, "env": [ { "name": "NODE_ID", From 7e4c33dcd23e23768da50f6a411a7eefabfc1997 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 12:26:12 -0300 Subject: [PATCH 127/139] Improve error logging for the tester --- node/Cargo.lock | 1 + node/tests/Cargo.toml | 1 + node/tests/src/main.rs | 23 ++++++++++------------- node/tools/src/k8s.rs | 6 +++++- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/node/Cargo.lock b/node/Cargo.lock index 1d07f37e..eb91083a 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -2942,6 +2942,7 @@ dependencies = [ "serde_json", "tokio", "tracing", + "tracing-subscriber", "zksync_consensus_tools", ] diff --git a/node/tests/Cargo.toml b/node/tests/Cargo.toml index 18c0a5aa..acf9ac25 100644 --- a/node/tests/Cargo.toml +++ b/node/tests/Cargo.toml @@ -15,6 +15,7 @@ jsonrpsee.workspace = true clap.workspace = true tracing.workspace = true anyhow.workspace = true +tracing-subscriber.workspace = true [lints] workspace = true diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index cf2161c7..94bc3e59 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -69,9 +69,9 @@ pub async fn start_tests_pod() -> anyhow::Result<()> { /// Sanity test for the RPC server. pub async fn sanity_test() { let config_file_path = get_config_path(); - let node_ips = fs::read_to_string(config_file_path).unwrap(); - for ip in node_ips.lines() { - let url: String = format!("http://{}:3154", ip); + let nodes_socket = fs::read_to_string(config_file_path).unwrap(); + for socket in nodes_socket.lines() { + let url: String = format!("http://{}", socket); let rpc_client = HttpClientBuilder::default().build(url).unwrap(); let params = Params::new(None); let response: serde_json::Value = rpc_client @@ -84,21 +84,18 @@ pub async fn sanity_test() { /// Main function for the test. #[tokio::main] -async fn main() { +async fn main() -> anyhow::Result<()> { let args = TesterCLI::parse(); + tracing_subscriber::fmt::init(); match args.command { - TesterCommands::GenerateConfig => { - let _ = generate_config().await; - tracing::info!("Config succesfully generated") - } - TesterCommands::StartPod => { - let _ = start_tests_pod().await; - tracing::info!("Pod started succesfully!") - } + TesterCommands::GenerateConfig => generate_config().await, + TesterCommands::StartPod => start_tests_pod().await, TesterCommands::Run => { + tracing::info!("Running sanity test"); sanity_test().await; - tracing::info!("Test passed!") + tracing::info!("Test Passed!"); + Ok(()) } } } diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 97d2add8..96ffb050 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,5 +1,5 @@ use crate::{config, NodeAddr}; -use anyhow::{anyhow, bail, Context}; +use anyhow::{anyhow, bail, ensure, Context}; use k8s_openapi::{ api::{ apps::v1::{Deployment, DeploymentSpec}, @@ -35,6 +35,10 @@ pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); let lp = ListParams::default(); let pod = pods.list(&lp).await?; + ensure!( + !pod.items.is_empty(), + "No consensus pods found in the k8s cluster" + ); let a: Result, _> = pod .into_iter() .filter(|pod| { From 573d238c21c25e0c128d802278553e108a440d9e Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 13:33:31 -0300 Subject: [PATCH 128/139] Do general cleanup of k8s new functions --- node/tools/src/k8s.rs | 46 +++++++++++++------------------------------ 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 96ffb050..90c450ab 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -13,7 +13,7 @@ use kube::{ Api, Client, ResourceExt, }; use serde_json::json; -use std::{collections::HashMap, net::SocketAddr, str::FromStr}; +use std::{collections::HashMap, net::SocketAddr}; use tokio_retry::strategy::FixedInterval; use tokio_retry::Retry; use tracing::log::info; @@ -34,18 +34,18 @@ pub async fn get_client() -> anyhow::Result { pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { let pods: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); let lp = ListParams::default(); - let pod = pods.list(&lp).await?; + let pods = pods.list(&lp).await?; ensure!( - !pod.items.is_empty(), + !pods.items.is_empty(), "No consensus pods found in the k8s cluster" ); - let a: Result, _> = pod + let pods_addresses: Result, _> = pods .into_iter() .filter(|pod| { let docker_image = pod .spec .clone() - .and_then(|spec: PodSpec| spec.containers[0].clone().image); + .and_then(|spec| spec.containers[0].clone().image); if let Some(docker_image) = docker_image { docker_image.contains(DOCKER_IMAGE_NAME) } else { @@ -53,8 +53,8 @@ pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result anyhow::Result anyhow::Result<()> { metadata: ObjectMeta { name: Some("tests-deployment".to_string()), namespace: Some(DEFAULT_NAMESPACE.to_string()), - labels: Some( - [("app".to_string(), "test-node".to_string())] - .iter() - .cloned() - .collect(), - ), + labels: Some([("app".to_string(), "test-node".to_string())].into()), ..Default::default() }, spec: Some(DeploymentSpec { selector: LabelSelector { - match_labels: Some( - [("app".to_string(), "test-node".to_string())] - .iter() - .cloned() - .collect(), - ), + match_labels: Some([("app".to_string(), "test-node".to_string())].into()), ..Default::default() }, replicas: Some(1), template: PodTemplateSpec { metadata: Some(ObjectMeta { - labels: Some( - [("app".to_string(), "test-node".to_string())] - .iter() - .cloned() - .collect(), - ), + labels: Some([("app".to_string(), "test-node".to_string())].into()), ..Default::default() }), spec: Some(PodSpec { From abab39a385598adb5d44c4f3276185654d59c39d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 17:58:06 -0300 Subject: [PATCH 129/139] Use of filter_map and add context to errors --- node/tests/src/main.rs | 8 ++++-- node/tools/src/k8s.rs | 63 ++++++++++++++++++++++++------------------ 2 files changed, 42 insertions(+), 29 deletions(-) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 94bc3e59..023bf890 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -44,7 +44,9 @@ fn get_config_path() -> PathBuf { /// Generate a config file with the IPs of the consensus nodes in the kubernetes cluster. pub async fn generate_config() -> anyhow::Result<()> { let client = k8s::get_client().await?; - let pods_ip = k8s::get_consensus_node_ips(&client).await?; + let pods_ip = k8s::get_consensus_nodes_address(&client) + .await + .context("Failed to get consensus pods address")?; let config_file_path = get_config_path(); for addr in pods_ip { let mut config_file = fs::OpenOptions::new() @@ -52,7 +54,9 @@ pub async fn generate_config() -> anyhow::Result<()> { .create(true) .truncate(true) .open(&config_file_path)?; - config_file.write_all(addr.to_string().as_bytes())?; + config_file + .write_all(addr.to_string().as_bytes()) + .with_context(|| "Failed to write to config file")?; } Ok(()) } diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 90c450ab..a1abfd01 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -1,9 +1,9 @@ use crate::{config, NodeAddr}; -use anyhow::{anyhow, bail, ensure, Context}; +use anyhow::{anyhow, ensure, Context}; use k8s_openapi::{ api::{ apps::v1::{Deployment, DeploymentSpec}, - core::v1::{Container, Namespace, Pod, PodSpec, PodStatus, PodTemplateSpec}, + core::v1::{Container, Namespace, Pod, PodSpec, PodTemplateSpec}, }, apimachinery::pkg::apis::meta::v1::LabelSelector, }; @@ -31,7 +31,7 @@ pub async fn get_client() -> anyhow::Result { } /// Get the IP addresses and the exposed port of the RPC server of the consensus nodes in the kubernetes cluster. -pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result> { +pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result> { let pods: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); let lp = ListParams::default(); let pods = pods.list(&lp).await?; @@ -39,35 +39,44 @@ pub async fn get_consensus_node_ips(client: &Client) -> anyhow::Result, _> = pods + let pod_addresses: Vec = pods .into_iter() - .filter(|pod| { - let docker_image = pod - .spec - .clone() - .and_then(|spec| spec.containers[0].clone().image); - if let Some(docker_image) = docker_image { - docker_image.contains(DOCKER_IMAGE_NAME) + .filter_map(|pod| { + let pod_spec = pod.spec.clone().context("Failed to get pod spec").ok()?; + let pod_running_container = pod_spec + .containers + .first() + .context("Failed to get pod container") + .ok()? + .to_owned(); + let docker_image = pod_running_container + .image + .context("Failed to get pod docker image") + .ok()?; + + if docker_image.contains(DOCKER_IMAGE_NAME) { + let pod_ip = pod + .status + .context("Failed to get pod status") + .ok()? + .pod_ip + .context("Failed to get pod ip") + .ok()?; + let port = pod_running_container.ports?.iter().find_map(|port| { + let port = port.container_port.try_into().ok()?; + (port != config::NODES_PORT).then_some(port) + }); + Some(SocketAddr::new(pod_ip.parse().ok()?, port?)) } else { - false + None } }) - .map(|pod| { - let pod_ip = pod.status.and_then(|status| status.pod_ip); - let port = pod.spec.and_then(|spec| { - spec.containers[0].clone().ports.and_then(|ports| { - ports - .iter() - .find(|port| port.container_port != config::NODES_PORT as i32) - .map(|port| port.container_port) - }) - }); - let pod_ip = pod_ip.context("pod_ip")?; - let port: u16 = port.context("port")?.try_into().context("port")?; - Ok(SocketAddr::new(pod_ip.parse()?, port)) - }) .collect(); - pods_addresses + ensure!( + !pod_addresses.is_empty(), + "No consensus pods found in the k8s cluster" + ); + Ok(pod_addresses) } /// Creates a namespace in k8s cluster From 9bf46f51cecaf0a2038782735f9cc55e6111ed59 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 18:00:33 -0300 Subject: [PATCH 130/139] Clean test-suite image in the clean target --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index da206528..42315ebb 100644 --- a/Makefile +++ b/Makefile @@ -60,6 +60,7 @@ clean_docker: docker rm -f consensus-node-2 docker network rm -f node-net docker image rm -f consensus-node + docker image rm -f test-suite addresses_file: mkdir -p ${EXECUTABLE_NODE_DIR}/docker-config From 37c74eb059250860f5ac328e00220fa8cee7bbce Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 18:06:52 -0300 Subject: [PATCH 131/139] Add doc comment explaining the unwraps usage --- node/tests/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index 023bf890..dd695163 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -71,6 +71,7 @@ pub async fn start_tests_pod() -> anyhow::Result<()> { } /// Sanity test for the RPC server. +/// We use unwraps here because this function is intended to be used like a test. pub async fn sanity_test() { let config_file_path = get_config_path(); let nodes_socket = fs::read_to_string(config_file_path).unwrap(); From 8b6f54d635e5fde1bc7365b1a46516f0dd8f2ba2 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 22 Feb 2024 18:33:13 -0300 Subject: [PATCH 132/139] Remove unnecesary clones --- node/tools/src/k8s.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index a1abfd01..ba2f39b4 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -32,7 +32,7 @@ pub async fn get_client() -> anyhow::Result { /// Get the IP addresses and the exposed port of the RPC server of the consensus nodes in the kubernetes cluster. pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result> { - let pods: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); + let pods: Api = Api::namespaced(client.to_owned(), DEFAULT_NAMESPACE); let lp = ListParams::default(); let pods = pods.list(&lp).await?; ensure!( @@ -42,7 +42,7 @@ pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result = pods .into_iter() .filter_map(|pod| { - let pod_spec = pod.spec.clone().context("Failed to get pod spec").ok()?; + let pod_spec = pod.spec.context("Failed to get pod spec").ok()?; let pod_running_container = pod_spec .containers .first() @@ -156,7 +156,7 @@ pub async fn create_tests_deployment(client: &Client) -> anyhow::Result<()> { ..Default::default() }; - let deployments: Api = Api::namespaced(client.clone(), DEFAULT_NAMESPACE); + let deployments: Api = Api::namespaced(client.to_owned(), DEFAULT_NAMESPACE); let post_params = PostParams::default(); let result = deployments.create(&post_params, &deployment).await?; From 34425263cb250925304c3534ce2e3e3cb23661c8 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 23 Feb 2024 14:15:28 -0300 Subject: [PATCH 133/139] Remove RPCMethod trait --- node/tests/src/main.rs | 12 ++--- node/tools/src/main.rs | 1 - node/tools/src/rpc/methods/config.rs | 56 ++++++--------------- node/tools/src/rpc/methods/health_check.rs | 31 +++++------- node/tools/src/rpc/methods/last_view.rs | 58 ++++++---------------- node/tools/src/rpc/methods/mod.rs | 16 +----- node/tools/src/rpc/methods/peers.rs | 56 +++++++++------------ node/tools/src/rpc/server.rs | 47 ++++++++---------- 8 files changed, 92 insertions(+), 185 deletions(-) diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index dd695163..e7d5849d 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -3,11 +3,8 @@ use std::{fs, io::Write, path::PathBuf}; use anyhow::Context; use clap::{Parser, Subcommand}; -use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params, types::Params}; -use zksync_consensus_tools::{ - k8s, - rpc::methods::{health_check::HealthCheck, RPCMethod}, -}; +use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params}; +use zksync_consensus_tools::{k8s, rpc::methods::health_check}; /// Command line arguments. #[derive(Debug, Parser)] @@ -78,12 +75,11 @@ pub async fn sanity_test() { for socket in nodes_socket.lines() { let url: String = format!("http://{}", socket); let rpc_client = HttpClientBuilder::default().build(url).unwrap(); - let params = Params::new(None); let response: serde_json::Value = rpc_client - .request(HealthCheck::method(), rpc_params!()) + .request(health_check::method(), rpc_params!()) .await .unwrap(); - assert_eq!(response, HealthCheck::callback(params).unwrap()); + assert_eq!(response, health_check::callback().unwrap()); } } diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index ed417fc4..33ff0beb 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -2,7 +2,6 @@ //! manages communication between the actors. It is the main executable in this workspace. use anyhow::Context as _; use clap::Parser; -use k8s_openapi::api::node; use std::{fs, io::IsTerminal as _, path::PathBuf}; use tracing::metadata::LevelFilter; use tracing_subscriber::{prelude::*, Registry}; diff --git a/node/tools/src/rpc/methods/config.rs b/node/tools/src/rpc/methods/config.rs index f03e9353..2dd77498 100644 --- a/node/tools/src/rpc/methods/config.rs +++ b/node/tools/src/rpc/methods/config.rs @@ -1,48 +1,22 @@ //! Peers method for RPC server. -use crate::{config::encode_json, decode_json, AppConfig}; - -use super::RPCMethod; -use jsonrpsee::types::{error::ErrorCode, Params}; -use std::fs::{self}; +use crate::{config::encode_json, AppConfig}; +use jsonrpsee::core::RpcResult; use zksync_protobuf::serde::Serde; -/// Config method for RPC server. -pub(crate) struct ConfigInfo; - -// RPCMethod trait should be more general to allow external parameters like this case -// TODO fix the trait and implement this code in it -impl ConfigInfo { - /// Provide the node's config information - pub(crate) fn info(config: AppConfig) -> Result { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - Ok(serde_json::json!({ - "config": encode_json(&Serde(config)) - })) - } +/// Config response for /config endpoint. +pub fn callback(config: AppConfig) -> RpcResult { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + Ok(serde_json::json!({ + "config": encode_json(&Serde(config)) + })) } -impl RPCMethod for ConfigInfo { - /// Config response for /config endpoint. - fn callback(_params: Params) -> Result { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - let node_config = - fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; - let node_config = decode_json::>(&node_config) - .map_err(|_e| ErrorCode::InternalError)? - .0; - let config = encode_json(&Serde(node_config)); - Ok(serde_json::json!({ - "config": config - })) - } - - /// Config method name. - fn method() -> &'static str { - "config" - } +/// Config method name. +pub fn method() -> &'static str { + "config" +} - /// Method path for GET requests. - fn path() -> &'static str { - "/config" - } +/// Method path for GET requests. +pub fn path() -> &'static str { + "/config" } diff --git a/node/tools/src/rpc/methods/health_check.rs b/node/tools/src/rpc/methods/health_check.rs index c92f6e90..c36d7498 100644 --- a/node/tools/src/rpc/methods/health_check.rs +++ b/node/tools/src/rpc/methods/health_check.rs @@ -1,23 +1,16 @@ -//! Health check method for RPC server. -use super::RPCMethod; -use jsonrpsee::types::{error::ErrorCode, Params}; +use jsonrpsee::core::RpcResult; -/// Health check method for RPC server. -pub struct HealthCheck; - -impl RPCMethod for HealthCheck { - /// Health check response for /health endpoint. - fn callback(_params: Params) -> Result { - Ok(serde_json::json!({"health": true})) - } +/// Health check response for /health endpoint. +pub fn callback() -> RpcResult { + Ok(serde_json::json!({"health": true})) +} - /// Health check method name. - fn method() -> &'static str { - "health_check" - } +/// Health check method name. +pub fn method() -> &'static str { + "health_check" +} - /// Method path for GET requests. - fn path() -> &'static str { - "/health" - } +/// Method path for GET requests. +pub fn path() -> &'static str { + "/health" } diff --git a/node/tools/src/rpc/methods/last_view.rs b/node/tools/src/rpc/methods/last_view.rs index 2f86631c..705a7df4 100644 --- a/node/tools/src/rpc/methods/last_view.rs +++ b/node/tools/src/rpc/methods/last_view.rs @@ -1,50 +1,22 @@ //! Peers method for RPC server. -use crate::{config::encode_json, decode_json, AppConfig}; - -use super::RPCMethod; -use jsonrpsee::types::{error::ErrorCode, Params}; -use std::{fs, sync::Arc}; +use jsonrpsee::core::RpcResult; +use std::sync::Arc; use zksync_consensus_storage::{BlockStore, ReplicaState}; -use zksync_protobuf::serde::Serde; - -/// Config method for RPC server. -pub(crate) struct LastView; -impl LastView { - /// Provide the node's config information - pub(crate) fn info(node_storage: Arc) -> Result { - let block = node_storage; - let sub = &mut block.subscribe(); - let state = sub.borrow().clone(); - let replica_state = ReplicaState::from(state.last).view; - let replica_state = - serde_json::to_value(replica_state).map_err(|_e| ErrorCode::InternalError); - replica_state - } +/// Config response for /config endpoint. +pub fn callback(node_storage: Arc) -> RpcResult { + let sub = &mut node_storage.subscribe(); + let state = sub.borrow().clone(); + let replica_state = ReplicaState::from(state.last).view; + Ok(serde_json::json!(replica_state)) } -impl RPCMethod for LastView { - /// Config response for /config endpoint. - fn callback(_params: Params) -> Result { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - let node_config = - fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; - let node_config = decode_json::>(&node_config) - .map_err(|_e| ErrorCode::InternalError)? - .0; - let config = encode_json(&Serde(node_config)); - Ok(serde_json::json!({ - "config": config - })) - } - - /// Config method name. - fn method() -> &'static str { - "last_view" - } +/// Config method name. +pub fn method() -> &'static str { + "last_view" +} - /// Method path for GET requests. - fn path() -> &'static str { - "/last_view" - } +/// Method path for GET requests. +pub fn path() -> &'static str { + "/last_view" } diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index bcdf6a98..a0ef7d36 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,16 +1,4 @@ -use jsonrpsee::types::{error::ErrorCode, Params}; - -/// Trait to implement for new RPC methods. -pub trait RPCMethod { - /// Method response logic when called. - fn callback(params: Params) -> Result; - /// Method name. - fn method() -> &'static str; - /// Method path for GET requests. - fn path() -> &'static str; -} - -pub(crate) mod config; +pub mod config; pub mod health_check; pub mod last_view; -pub(crate) mod peers; +pub mod peers; diff --git a/node/tools/src/rpc/methods/peers.rs b/node/tools/src/rpc/methods/peers.rs index 35f974d4..58324529 100644 --- a/node/tools/src/rpc/methods/peers.rs +++ b/node/tools/src/rpc/methods/peers.rs @@ -1,41 +1,33 @@ //! Peers method for RPC server. use crate::{decode_json, AppConfig}; - -use super::RPCMethod; -use jsonrpsee::types::{error::ErrorCode, Params}; +use jsonrpsee::{core::RpcResult, types::error::ErrorCode}; use std::fs::{self}; use zksync_consensus_crypto::TextFmt; use zksync_protobuf::serde::Serde; -/// Peers method for RPC server. -pub(crate) struct PeersInfo; - -impl RPCMethod for PeersInfo { - /// Peers response for /peers endpoint. - fn callback(_params: Params) -> Result { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - let node_config = - fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; - let node_config = decode_json::>(&node_config) - .map_err(|_e| ErrorCode::InternalError)? - .0; - let peers: Vec = node_config - .gossip_static_inbound - .iter() - .map(|x| x.encode()) - .collect(); - Ok(serde_json::json!({ - "peers": peers - })) - } +/// Peers response for /peers endpoint. +pub fn callback() -> RpcResult { + // This may change in the future since we are assuming that the executor binary is being run inside the config directory. + let node_config = fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; + let node_config = decode_json::>(&node_config) + .map_err(|_e| ErrorCode::InternalError)? + .0; + let peers: Vec = node_config + .gossip_static_inbound + .iter() + .map(|x| x.encode()) + .collect(); + Ok(serde_json::json!({ + "peers": peers + })) +} - /// Peers method name. - fn method() -> &'static str { - "peers" - } +/// Peers method name. +pub fn method() -> &'static str { + "peers" +} - /// Method path for GET requests. - fn path() -> &'static str { - "/peers" - } +/// Method path for GET requests. +pub fn path() -> &'static str { + "/peers" } diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 2d68bc5e..2a5c8a51 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,8 +1,6 @@ use crate::AppConfig; -use super::methods::{ - config::ConfigInfo, health_check::HealthCheck, last_view::LastView, peers::PeersInfo, RPCMethod, -}; +use super::methods::{config, health_check, last_view, peers}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use std::{net::SocketAddr, sync::Arc}; use zksync_concurrency::{ctx, scope}; @@ -14,6 +12,7 @@ pub struct RPCServer { ip_address: SocketAddr, /// AppConfig config: AppConfig, + /// Node storage. node_storage: Arc, } @@ -32,44 +31,38 @@ impl RPCServer { let service_builder = tower::ServiceBuilder::new() // Proxy `GET /` requests to internal methods. .layer(ProxyGetRequestLayer::new( - HealthCheck::path(), - HealthCheck::method(), + health_check::path(), + health_check::method(), )?) + .layer(ProxyGetRequestLayer::new(peers::path(), peers::method())?) + .layer(ProxyGetRequestLayer::new(config::path(), config::method())?) .layer(ProxyGetRequestLayer::new( - PeersInfo::path(), - PeersInfo::method(), - )?) - .layer(ProxyGetRequestLayer::new( - ConfigInfo::path(), - ConfigInfo::method(), - )?) - .layer(ProxyGetRequestLayer::new( - LastView::path(), - LastView::method(), + last_view::path(), + last_view::method(), )?); - let server = Server::builder() - .set_http_middleware(service_builder) - .build(self.ip_address) - .await?; - let mut module = RpcModule::new(()); - module.register_method(HealthCheck::method(), |params, _| { - HealthCheck::callback(params) + module.register_method(health_check::method(), |_params, _| { + health_check::callback() })?; - module.register_method(PeersInfo::method(), |params, _| PeersInfo::callback(params))?; + module.register_method(peers::method(), |_params, _| peers::callback())?; // TODO find a better way to implement this as I had to clone the clone and move it to pass the borrow checker let config = self.config.clone(); - module.register_method(ConfigInfo::method(), move |_params, _| { - ConfigInfo::info(config.clone()) + module.register_method(config::method(), move |_params, _| { + config::callback(config.clone()) })?; let node_storage = self.node_storage.clone(); - module.register_method(LastView::method(), move |_params, _| { - LastView::info(node_storage.clone()) + module.register_method(last_view::method(), move |_params, _| { + last_view::callback(node_storage.clone()) })?; + let server = Server::builder() + .set_http_middleware(service_builder) + .build(self.ip_address) + .await?; + let handle = server.start(module); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { From a8364b93004bd71fb4e665de1cf65731cbe54c1d Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 7 Mar 2024 11:23:53 -0300 Subject: [PATCH 134/139] Merge main into rpc_execution_connection --- .github/CODEOWNERS | 3 +- CONTRIBUTING.md => .github/CONTRIBUTING.md | 0 README.md | 2 +- node/Cargo.lock | 363 ++++++------- node/Cargo.toml | 2 + node/actors/bft/src/config.rs | 26 +- node/actors/bft/src/leader/mod.rs | 8 +- node/actors/bft/src/leader/replica_commit.rs | 75 +-- node/actors/bft/src/leader/replica_prepare.rs | 91 ++-- node/actors/bft/src/leader/state_machine.rs | 57 +- node/actors/bft/src/leader/tests.rs | 239 ++++----- node/actors/bft/src/lib.rs | 1 - node/actors/bft/src/misc.rs | 23 - node/actors/bft/src/replica/block.rs | 16 +- node/actors/bft/src/replica/leader_commit.rs | 58 +- node/actors/bft/src/replica/leader_prepare.rs | 236 ++------- node/actors/bft/src/replica/mod.rs | 8 +- node/actors/bft/src/replica/new_view.rs | 24 +- node/actors/bft/src/replica/state_machine.rs | 11 +- node/actors/bft/src/replica/tests.rs | 421 ++++++++------- node/actors/bft/src/replica/timer.rs | 10 +- node/actors/bft/src/testonly/fuzz.rs | 41 +- node/actors/bft/src/testonly/node.rs | 4 +- node/actors/bft/src/testonly/run.rs | 12 +- node/actors/bft/src/testonly/ut_harness.rs | 140 ++--- node/actors/bft/src/tests.rs | 32 +- node/actors/executor/src/lib.rs | 62 +-- node/actors/executor/src/testonly.rs | 53 -- node/actors/executor/src/tests.rs | 151 ++---- node/actors/network/Cargo.toml | 1 + node/actors/network/src/config.rs | 82 +++ .../network/src/consensus/handshake/mod.rs | 17 + .../src/consensus/handshake/testonly.rs | 1 + .../network/src/consensus/handshake/tests.rs | 77 ++- node/actors/network/src/consensus/mod.rs | 239 ++++++++- node/actors/network/src/consensus/runner.rs | 190 ------- node/actors/network/src/consensus/state.rs | 45 -- node/actors/network/src/consensus/tests.rs | 115 +++- node/actors/network/src/event.rs | 21 - node/actors/network/src/gossip/arcmap.rs | 42 ++ .../network/src/gossip/handshake/mod.rs | 32 +- .../network/src/gossip/handshake/testonly.rs | 1 + .../network/src/gossip/handshake/tests.rs | 86 ++- node/actors/network/src/gossip/mod.rs | 88 +++- node/actors/network/src/gossip/runner.rs | 345 +++++------- node/actors/network/src/gossip/state.rs | 215 -------- node/actors/network/src/gossip/tests.rs | 189 ++++--- .../network/src/gossip/validator_addrs.rs | 110 ++++ node/actors/network/src/lib.rs | 194 ++++++- node/actors/network/src/metrics.rs | 4 +- node/actors/network/src/mux/tests/mod.rs | 21 +- node/actors/network/src/proto/consensus.proto | 3 +- node/actors/network/src/proto/gossip.proto | 9 +- node/actors/network/src/rpc/consensus.rs | 5 - node/actors/network/src/rpc/get_block.rs | 5 - node/actors/network/src/rpc/mod.rs | 17 +- node/actors/network/src/rpc/ping.rs | 26 +- .../network/src/rpc/push_block_store_state.rs | 16 +- .../network/src/rpc/push_validator_addrs.rs | 5 - node/actors/network/src/rpc/tests.rs | 64 ++- node/actors/network/src/state.rs | 164 ------ node/actors/network/src/testonly.rs | 117 +++-- node/actors/network/src/tests.rs | 6 +- node/actors/network/src/watch.rs | 8 +- node/actors/sync_blocks/src/config.rs | 31 +- node/actors/sync_blocks/src/lib.rs | 2 +- node/actors/sync_blocks/src/peers/mod.rs | 45 +- .../sync_blocks/src/peers/tests/basics.rs | 149 +++--- .../sync_blocks/src/peers/tests/fakes.rs | 54 +- .../actors/sync_blocks/src/peers/tests/mod.rs | 19 +- .../src/peers/tests/multiple_peers.rs | 102 ++-- .../sync_blocks/src/peers/tests/snapshots.rs | 322 +----------- .../sync_blocks/src/tests/end_to_end.rs | 234 ++++----- node/actors/sync_blocks/src/tests/mod.rs | 50 +- node/libs/concurrency/src/ctx/mod.rs | 2 + .../src => concurrency/src/ctx}/no_copy.rs | 18 +- node/libs/concurrency/src/testonly.rs | 1 + node/libs/roles/Cargo.toml | 5 +- node/libs/roles/src/proto/validator.proto | 43 +- node/libs/roles/src/validator/conv.rs | 115 +++- .../roles/src/validator/messages/block.rs | 45 +- .../roles/src/validator/messages/consensus.rs | 497 ++++++++---------- .../src/validator/messages/leader_commit.rs | 125 +++++ .../src/validator/messages/leader_prepare.rs | 267 ++++++++++ node/libs/roles/src/validator/messages/mod.rs | 8 + .../src/validator/messages/replica_commit.rs | 25 + .../src/validator/messages/replica_prepare.rs | 63 +++ node/libs/roles/src/validator/mod.rs | 3 - node/libs/roles/src/validator/testonly.rs | 316 +++++------ node/libs/roles/src/validator/tests.rs | 189 ++++--- node/libs/storage/src/block_store/metrics.rs | 15 +- node/libs/storage/src/block_store/mod.rs | 100 +++- node/libs/storage/src/proto/mod.proto | 12 +- node/libs/storage/src/replica_store.rs | 26 +- node/libs/storage/src/testonly/in_memory.rs | 59 ++- node/libs/storage/src/testonly/mod.rs | 37 +- node/libs/storage/src/tests.rs | 31 +- node/libs/utils/src/lib.rs | 1 - node/tests/src/main.rs | 25 +- node/tools/build.rs | 2 +- node/tools/src/bin/deployer.rs | 17 +- node/tools/src/bin/localnet_config.rs | 11 +- node/tools/src/config.rs | 84 +-- node/tools/src/k8s.rs | 250 ++++----- node/tools/src/main.rs | 5 +- node/tools/src/proto/mod.proto | 35 +- node/tools/src/rpc/methods/last_view.rs | 8 +- node/tools/src/store.rs | 85 ++- node/tools/src/tests.rs | 40 +- 109 files changed, 4193 insertions(+), 4109 deletions(-) rename CONTRIBUTING.md => .github/CONTRIBUTING.md (100%) delete mode 100644 node/actors/bft/src/misc.rs delete mode 100644 node/actors/executor/src/testonly.rs create mode 100644 node/actors/network/src/config.rs delete mode 100644 node/actors/network/src/consensus/runner.rs delete mode 100644 node/actors/network/src/consensus/state.rs delete mode 100644 node/actors/network/src/event.rs create mode 100644 node/actors/network/src/gossip/arcmap.rs create mode 100644 node/actors/network/src/gossip/validator_addrs.rs rename node/libs/{utils/src => concurrency/src/ctx}/no_copy.rs (50%) create mode 100644 node/libs/roles/src/validator/messages/leader_commit.rs create mode 100644 node/libs/roles/src/validator/messages/leader_prepare.rs create mode 100644 node/libs/roles/src/validator/messages/replica_commit.rs create mode 100644 node/libs/roles/src/validator/messages/replica_prepare.rs diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 82804b9e..078f73a4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,8 +1,7 @@ CODEOWNERS @brunoffranca -/node/actors/consensus/ @brunoffranca @moshababo +/node/actors/consensus/ @brunoffranca /node/actors/network/ @pompon0 -/node/actors/sync_blocks/ @slowli /node/libs/concurrency/ @pompon0 /node/libs/crypto/ @brunoffranca diff --git a/CONTRIBUTING.md b/.github/CONTRIBUTING.md similarity index 100% rename from CONTRIBUTING.md rename to .github/CONTRIBUTING.md diff --git a/README.md b/README.md index 9e84e6fd..52bc1f90 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ The following questions will be answered by the following resources: ## Policies - [Security policy](.github/SECURITY.md) -- [Contribution policy](CONTRIBUTING.md) +- [Contribution policy](.github/CONTRIBUTING.md) ## License diff --git a/node/Cargo.lock b/node/Cargo.lock index eb91083a..1eb664be 100644 --- a/node/Cargo.lock +++ b/node/Cargo.lock @@ -29,9 +29,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -54,9 +54,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", @@ -103,9 +103,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -151,9 +151,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "assert_matches" @@ -169,7 +169,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -249,7 +249,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -320,9 +320,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "byteorder" @@ -355,9 +355,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" dependencies = [ "jobserver", "libc", @@ -404,15 +404,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -466,9 +466,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.0" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" dependencies = [ "clap_builder", "clap_derive", @@ -476,9 +476,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.0" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -495,7 +495,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -658,14 +658,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "darling" -version = "0.20.5" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc5d6b04b3fd0ba9926f945895de7d806260a2d7431ba82e7edaecb043c4c6b8" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ "darling_core", "darling_macro", @@ -673,27 +673,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.5" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e48a959bcd5c761246f5d090ebc2fbf7b9cd527a492b07a67510c108f1e7e3" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "darling_macro" -version = "0.20.5" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1545d67a2149e1d93b7e5c7752dce5a7426eb5d1357ddcfd89336b94444f77" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -760,9 +760,9 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ed25519" @@ -791,9 +791,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elsa" @@ -943,7 +943,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -999,9 +999,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -1040,9 +1040,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e" dependencies = [ "cfg-if", "crunchy", @@ -1066,9 +1066,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1087,9 +1087,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1232,9 +1232,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown", @@ -1260,9 +1260,9 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe8f25ce1159c7740ff0b9b2f5cdf4a8428742ba7c112b9f20f22cd5219c7dab" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", "libc", @@ -1304,9 +1304,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1430,9 +1430,9 @@ dependencies = [ [[package]] name = "k8s-openapi" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301f367a36090b7dfdaac248ee3ed4f14a6a8292e7bec0f1c5e6e2e1f181cd33" +checksum = "550f99d93aa4c2b25de527bce492d772caf5e21d7ac9bd4b508ba781c8d91e30" dependencies = [ "base64 0.21.7", "chrono", @@ -1527,7 +1527,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1576,12 +1576,12 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -1613,22 +1613,22 @@ dependencies = [ [[package]] name = "linkme" -version = "0.3.22" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b53ad6a33de58864705954edb5ad5d571a010f9e296865ed43dc72a5621b430" +checksum = "bb2cfee0de9bd869589fb9a015e155946d1be5ff415cb844c2caccc6cc4b5db9" dependencies = [ "linkme-impl", ] [[package]] name = "linkme-impl" -version = "0.3.22" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e542a18c94a9b6fcc7adb090fa3ba6b79ee220a16404f325672729f32a66ff" +checksum = "adf157a4dc5a29b7b464aa8fe7edeff30076e07e13646a1c3874f58477dc99f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1649,9 +1649,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "logos" @@ -1673,7 +1673,7 @@ dependencies = [ "proc-macro2", "quote", "regex-syntax 0.6.29", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1730,7 +1730,7 @@ checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1756,9 +1756,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -1859,9 +1859,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl-probe" @@ -1943,9 +1943,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -1954,9 +1954,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" +checksum = "b0d24f72393fd16ab6ac5738bc33cdb6a9aa73f8b902e8fe29cf4e67d7dd1026" dependencies = [ "pest", "pest_generator", @@ -1964,22 +1964,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" +checksum = "fdc17e2a6c7d0a492f0158d7a4bd66cc17280308bbaff78d5bef566dca35ab80" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "pest_meta" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" +checksum = "934cd7631c050f4674352a6e835d5f6711ffbfb9345c2fc0107155ac495ae293" dependencies = [ "once_cell", "pest", @@ -1998,22 +1998,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2040,9 +2040,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" @@ -2091,9 +2091,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -2130,7 +2130,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2162,7 +2162,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2192,7 +2192,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.48", + "syn 2.0.52", "tempfile", "which", ] @@ -2207,7 +2207,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2349,9 +2349,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -2393,7 +2393,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -2408,9 +2408,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -2431,16 +2431,17 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2538,9 +2539,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -2635,15 +2636,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -2660,13 +2661,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2682,9 +2683,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -2693,9 +2694,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap", "itoa", @@ -2814,12 +2815,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2891,9 +2892,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -2902,9 +2903,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -2914,22 +2915,22 @@ dependencies = [ [[package]] name = "test-casing" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2378d657757969a2cec9ec4eb616be8f01be98c21c6467991f91cb182e4653b" +checksum = "f4d233764420cbfe244e6a50177798a01b20184df210eb626898cd1b20c06633" dependencies = [ "test-casing-macro", ] [[package]] name = "test-casing-macro" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfbe7811249c4c914b06141b8ac0f2cee2733fb883d05eb19668a45fc60c3d5" +checksum = "f9b53c7124dd88026d5d98a1eb1fd062a578b7d783017c9298825526c7fb6427" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2948,29 +2949,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -3066,7 +3067,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3187,7 +3188,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3270,9 +3271,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -3372,14 +3373,14 @@ source = "git+https://github.com/matter-labs/vise.git?rev=1c9cc500e92cf9ea052b23 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -3402,9 +3403,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3412,24 +3413,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3437,28 +3438,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -3513,7 +3514,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -3531,7 +3532,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -3551,17 +3552,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -3572,9 +3573,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -3584,9 +3585,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -3596,9 +3597,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -3608,9 +3609,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -3620,9 +3621,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -3632,9 +3633,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -3644,9 +3645,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "yansi" @@ -3671,7 +3672,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3691,7 +3692,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -3779,6 +3780,7 @@ name = "zksync_consensus_network" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "im", "once_cell", @@ -3806,6 +3808,7 @@ name = "zksync_consensus_roles" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "bit-vec", "hex", "prost", @@ -3933,7 +3936,7 @@ dependencies = [ "prost-reflect", "protox", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] diff --git a/node/Cargo.toml b/node/Cargo.toml index c867def7..d08616dc 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -159,3 +159,5 @@ needless_pass_by_ref_mut = "allow" box_default = "allow" # remove once fix to https://github.com/rust-lang/rust-clippy/issues/11764 is available on CI. map_identity = "allow" +# &*x is not equivalent to x, because it affects borrowing in closures. +borrow_deref_ref = "allow" diff --git a/node/actors/bft/src/config.rs b/node/actors/bft/src/config.rs index c1a94b74..77938549 100644 --- a/node/actors/bft/src/config.rs +++ b/node/actors/bft/src/config.rs @@ -1,7 +1,6 @@ //! The inner data of the consensus state machine. This is shared between the different roles. -use crate::{misc, PayloadManager}; +use crate::PayloadManager; use std::sync::Arc; -use tracing::instrument; use zksync_consensus_roles::validator; use zksync_consensus_storage as storage; @@ -10,8 +9,6 @@ use zksync_consensus_storage as storage; pub struct Config { /// The validator's secret key. pub secret_key: validator::SecretKey, - /// A vector of public keys for all the validators in the network. - pub validator_set: validator::ValidatorSet, /// The maximum size of the payload of a block, in bytes. We will /// reject blocks with payloads larger than this. pub max_payload_size: usize, @@ -24,23 +21,8 @@ pub struct Config { } impl Config { - /// Computes the validator for the given view. - #[instrument(level = "trace", ret)] - pub fn view_leader(&self, view_number: validator::ViewNumber) -> validator::PublicKey { - let index = view_number.0 as usize % self.validator_set.len(); - self.validator_set.get(index).unwrap().clone() - } - - /// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, - /// for a given number of replicas. - #[instrument(level = "trace", ret)] - pub fn threshold(&self) -> usize { - misc::consensus_threshold(self.validator_set.len()) - } - - /// Calculate the maximum number of faulty replicas, for a given number of replicas. - #[instrument(level = "trace", ret)] - pub fn faulty_replicas(&self) -> usize { - misc::faulty_replicas(self.validator_set.len()) + /// Genesis. + pub fn genesis(&self) -> &validator::Genesis { + self.block_store.genesis() } } diff --git a/node/actors/bft/src/leader/mod.rs b/node/actors/bft/src/leader/mod.rs index 6cff3091..f4615904 100644 --- a/node/actors/bft/src/leader/mod.rs +++ b/node/actors/bft/src/leader/mod.rs @@ -2,14 +2,10 @@ //! and aggregates replica messages. It mainly acts as a central point of communication for the replicas. Note that //! our consensus node will perform both the replica and leader roles simultaneously. -mod replica_commit; -mod replica_prepare; +pub(crate) mod replica_commit; +pub(crate) mod replica_prepare; mod state_machine; #[cfg(test)] mod tests; -#[cfg(test)] -pub(crate) use self::replica_commit::Error as ReplicaCommitError; -#[cfg(test)] -pub(crate) use self::replica_prepare::Error as ReplicaPrepareError; pub(crate) use self::state_machine::StateMachine; diff --git a/node/actors/bft/src/leader/replica_commit.rs b/node/actors/bft/src/leader/replica_commit.rs index f0d31d4a..2a794bd8 100644 --- a/node/actors/bft/src/leader/replica_commit.rs +++ b/node/actors/bft/src/leader/replica_commit.rs @@ -1,3 +1,4 @@ +//! Handler of a ReplicaCommit message. use super::StateMachine; use crate::metrics; use std::collections::HashMap; @@ -34,6 +35,9 @@ pub(crate) enum Error { /// The processing node is not a lead for this message's view. #[error("we are not a leader for this message's view")] NotLeaderInView, + /// Invalid message. + #[error("invalid message: {0:#}")] + InvalidMessage(anyhow::Error), /// Duplicate message from a replica. #[error("duplicate message from a replica (existing message: {existing_message:?}")] DuplicateMessage { @@ -55,28 +59,26 @@ impl StateMachine { // ----------- Checking origin of the message -------------- // Unwrap message. - let message = signed_message.msg; + let message = &signed_message.msg; let author = &signed_message.key; // Check protocol version compatibility. - if !crate::PROTOCOL_VERSION.compatible(&message.protocol_version) { + if !crate::PROTOCOL_VERSION.compatible(&message.view.protocol_version) { return Err(Error::IncompatibleProtocolVersion { - message_version: message.protocol_version, + message_version: message.view.protocol_version, local_version: crate::PROTOCOL_VERSION, }); } // Check that the message signer is in the validator set. - let validator_index = - self.config - .validator_set - .index(author) - .ok_or(Error::NonValidatorSigner { - signer: author.clone(), - })?; + if !self.config.genesis().validators.contains(author) { + return Err(Error::NonValidatorSigner { + signer: author.clone(), + }); + } // If the message is from the "past", we discard it. - if (message.view, validator::Phase::Commit) < (self.view, self.phase) { + if (message.view.number, validator::Phase::Commit) < (self.view, self.phase) { return Err(Error::Old { current_view: self.view, current_phase: self.phase, @@ -84,18 +86,24 @@ impl StateMachine { } // If the message is for a view when we are not a leader, we discard it. - if self.config.view_leader(message.view) != self.config.secret_key.public() { + if self + .config + .genesis() + .validators + .view_leader(message.view.number) + != self.config.secret_key.public() + { return Err(Error::NotLeaderInView); } // If we already have a message from the same validator and for the same view, we discard it. if let Some(existing_message) = self .commit_message_cache - .get(&message.view) + .get(&message.view.number) .and_then(|x| x.get(author)) { return Err(Error::DuplicateMessage { - existing_message: existing_message.msg, + existing_message: existing_message.msg.clone(), }); } @@ -104,30 +112,40 @@ impl StateMachine { // Check the signature on the message. signed_message.verify().map_err(Error::InvalidSignature)?; + message + .verify(self.config.genesis()) + .map_err(Error::InvalidMessage)?; + // ----------- All checks finished. Now we process the message. -------------- + // TODO: we have a bug here since we don't check whether replicas commit + // to the same proposal. + // We add the message to the incrementally-constructed QC. self.commit_qcs - .entry(message.view) - .or_insert(CommitQC::new(message, &self.config.validator_set)) - .add(&signed_message.sig, validator_index); + .entry(message.view.number) + .or_insert_with(|| CommitQC::new(message.clone(), self.config.genesis())) + .add(&signed_message, self.config.genesis()); // We store the message in our cache. - let cache_entry = self.commit_message_cache.entry(message.view).or_default(); - cache_entry.insert(author.clone(), signed_message); + let cache_entry = self + .commit_message_cache + .entry(message.view.number) + .or_default(); + cache_entry.insert(author.clone(), signed_message.clone()); // Now we check if we have enough messages to continue. let mut by_proposal: HashMap<_, Vec<_>> = HashMap::new(); for msg in cache_entry.values() { by_proposal.entry(msg.msg.proposal).or_default().push(msg); } - let Some((_, replica_messages)) = by_proposal - .into_iter() - .find(|(_, v)| v.len() >= self.config.threshold()) + let threshold = self.config.genesis().validators.threshold(); + let Some((_, replica_messages)) = + by_proposal.into_iter().find(|(_, v)| v.len() >= threshold) else { return Ok(()); }; - debug_assert_eq!(replica_messages.len(), self.config.threshold()); + debug_assert_eq!(replica_messages.len(), threshold); // ----------- Update the state machine -------------- @@ -135,7 +153,7 @@ impl StateMachine { metrics::METRICS .leader_commit_phase_latency .observe_latency(now - self.phase_start); - self.view = message.view.next(); + self.view = message.view.number.next(); self.phase = validator::Phase::Prepare; self.phase_start = now; @@ -143,10 +161,10 @@ impl StateMachine { // Remove replica commit messages for this view, so that we don't create a new leader commit // for this same view if we receive another replica commit message after this. - self.commit_message_cache.remove(&message.view); + self.commit_message_cache.remove(&message.view.number); // Consume the incrementally-constructed QC for this view. - let justification = self.commit_qcs.remove(&message.view).unwrap(); + let justification = self.commit_qcs.remove(&message.view.number).unwrap(); // Broadcast the leader commit message to all replicas (ourselves included). let output_message = ConsensusInputMessage { @@ -154,10 +172,7 @@ impl StateMachine { .config .secret_key .sign_msg(validator::ConsensusMsg::LeaderCommit( - validator::LeaderCommit { - protocol_version: crate::PROTOCOL_VERSION, - justification, - }, + validator::LeaderCommit { justification }, )), recipient: Target::Broadcast, }; diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index b83798aa..4694f682 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -1,3 +1,4 @@ +//! Handler of a ReplicaPrepare message. use super::StateMachine; use tracing::instrument; use zksync_concurrency::{ctx, error::Wrap}; @@ -37,22 +38,12 @@ pub(crate) enum Error { /// Existing message from the same replica. existing_message: validator::ReplicaPrepare, }, - /// High QC of a future view. - #[error( - "high QC of a future view (high QC view: {high_qc_view:?}, current view: {current_view:?}" - )] - HighQCOfFutureView { - /// Received high QC view. - high_qc_view: validator::ViewNumber, - /// Current view. - current_view: validator::ViewNumber, - }, /// Invalid message signature. #[error("invalid signature: {0:#}")] InvalidSignature(#[source] validator::Error), - /// Invalid `HighQC` message. - #[error("invalid high QC: {0:#}")] - InvalidHighQC(#[source] anyhow::Error), + /// Invalid message. + #[error(transparent)] + InvalidMessage(validator::ReplicaPrepareVerifyError), /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. #[error(transparent)] Internal(#[from] ctx::Error), @@ -84,24 +75,22 @@ impl StateMachine { let author = &signed_message.key; // Check protocol version compatibility. - if !crate::PROTOCOL_VERSION.compatible(&message.protocol_version) { + if !crate::PROTOCOL_VERSION.compatible(&message.view.protocol_version) { return Err(Error::IncompatibleProtocolVersion { - message_version: message.protocol_version, + message_version: message.view.protocol_version, local_version: crate::PROTOCOL_VERSION, }); } // Check that the message signer is in the validator set. - let validator_index = - self.config - .validator_set - .index(author) - .ok_or(Error::NonValidatorSigner { - signer: author.clone(), - })?; + if !self.config.genesis().validators.contains(author) { + return Err(Error::NonValidatorSigner { + signer: author.clone(), + }); + } // If the message is from the "past", we discard it. - if (message.view, validator::Phase::Prepare) < (self.view, self.phase) { + if (message.view.number, validator::Phase::Prepare) < (self.view, self.phase) { return Err(Error::Old { current_view: self.view, current_phase: self.phase, @@ -109,14 +98,20 @@ impl StateMachine { } // If the message is for a view when we are not a leader, we discard it. - if self.config.view_leader(message.view) != self.config.secret_key.public() { + if self + .config + .genesis() + .validators + .view_leader(message.view.number) + != self.config.secret_key.public() + { return Err(Error::NotLeaderInView); } // If we already have a message from the same validator and for the same view, we discard it. if let Some(existing_message) = self .prepare_message_cache - .get(&message.view) + .get(&message.view.number) .and_then(|x| x.get(author)) { return Err(Error::Exists { @@ -129,60 +124,50 @@ impl StateMachine { // Check the signature on the message. signed_message.verify().map_err(Error::InvalidSignature)?; - // ----------- Checking the contents of the message -------------- - - // Verify the high QC. + // Verify the message. message - .high_qc - .verify(&self.config.validator_set, self.config.threshold()) - .map_err(Error::InvalidHighQC)?; - - // If the high QC is for a future view, we discard the message. - // This check is not necessary for correctness, but it's useful to - // guarantee that our proposals don't contain QCs from the future. - if message.high_qc.message.view >= message.view { - return Err(Error::HighQCOfFutureView { - high_qc_view: message.high_qc.message.view, - current_view: message.view, - }); - } + .verify(self.config.genesis()) + .map_err(Error::InvalidMessage)?; // ----------- All checks finished. Now we process the message. -------------- // We add the message to the incrementally-constructed QC. - self.prepare_qcs.entry(message.view).or_default().add( - &signed_message, - validator_index, - &self.config.validator_set, - ); + self.prepare_qcs + .entry(message.view.number) + .or_insert_with(|| validator::PrepareQC::new(message.view.clone())) + .add(&signed_message, self.config.genesis()); // We store the message in our cache. self.prepare_message_cache - .entry(message.view) + .entry(message.view.number) .or_default() .insert(author.clone(), signed_message); // Now we check if we have enough messages to continue. - let num_messages = self.prepare_message_cache.get(&message.view).unwrap().len(); + let num_messages = self + .prepare_message_cache + .get(&message.view.number) + .unwrap() + .len(); - if num_messages < self.config.threshold() { + if num_messages < self.config.genesis().validators.threshold() { return Ok(()); } // Remove replica prepare messages for this view, so that we don't create a new block proposal // for this same view if we receive another replica prepare message after this. - self.prepare_message_cache.remove(&message.view); + self.prepare_message_cache.remove(&message.view.number); - debug_assert_eq!(num_messages, self.config.threshold()); + debug_assert_eq!(num_messages, self.config.genesis().validators.threshold()); // ----------- Update the state machine -------------- - self.view = message.view; + self.view = message.view.number; self.phase = validator::Phase::Commit; self.phase_start = ctx.now(); // Consume the incrementally-constructed QC for this view. - let justification = self.prepare_qcs.remove(&message.view).unwrap(); + let justification = self.prepare_qcs.remove(&message.view.number).unwrap(); self.prepare_qc.send_replace(Some(justification)); Ok(()) diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index ad08d118..459b1e15 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -135,10 +135,10 @@ impl StateMachine { let Some(prepare_qc) = sync::changed(ctx, &mut prepare_qc).await?.clone() else { continue; }; - if prepare_qc.view() < next_view { + if prepare_qc.view.number < next_view { continue; }; - next_view = prepare_qc.view().next(); + next_view = prepare_qc.view.number.next(); Self::propose(ctx, config, prepare_qc, pipe).await?; } } @@ -151,45 +151,31 @@ impl StateMachine { justification: validator::PrepareQC, pipe: &OutputSender, ) -> ctx::Result<()> { - // Get the highest block voted for and check if there's a quorum of votes for it. To have a quorum - // in this situation, we require 2*f+1 votes, where f is the maximum number of faulty replicas. - let mut count: HashMap<_, usize> = HashMap::new(); - for (vote, signers) in &justification.map { - *count.entry(vote.high_vote.proposal).or_default() += signers.len(); - } - - let highest_vote: Option = count - .iter() - // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. - .find_map(|(h, v)| (*v > 2 * cfg.faulty_replicas()).then_some(h)) - .cloned(); - - // Get the highest validator::CommitQC. - let highest_qc: &validator::CommitQC = justification - .map - .keys() - .map(|s| &s.high_qc) - .max_by_key(|qc| qc.message.view) - .unwrap(); + let high_vote = justification.high_vote(cfg.genesis()); + let high_qc = justification.high_qc(); // Create the block proposal to send to the replicas, // and the commit vote to store in our block proposal cache. - let (proposal, payload) = match highest_vote { + let (proposal, payload) = match high_vote { // The previous block was not finalized, so we need to propose it again. // For this we only need the header, since we are guaranteed that at least // f+1 honest replicas have the block and can broadcast it when finalized // (2f+1 have stated that they voted for the block, at most f are malicious). - Some(proposal) if proposal != highest_qc.message.proposal => (proposal, None), + Some(proposal) if Some(&proposal) != high_qc.map(|qc| &qc.message.proposal) => { + (proposal, None) + } // The previous block was finalized, so we can propose a new block. _ => { + let fork = &cfg.genesis().fork; + let (parent, number) = match high_qc { + Some(qc) => (Some(qc.header().hash()), qc.header().number.next()), + None => (fork.first_parent, fork.first_block), + }; // Defensively assume that PayloadManager cannot propose until the previous block is stored. - cfg.block_store - .wait_until_persisted(ctx, highest_qc.header().number) - .await?; - let payload = cfg - .payload_manager - .propose(ctx, highest_qc.header().number.next()) - .await?; + if let Some(prev) = number.prev() { + cfg.block_store.wait_until_persisted(ctx, prev).await?; + } + let payload = cfg.payload_manager.propose(ctx, number).await?; if payload.0.len() > cfg.max_payload_size { return Err(anyhow::format_err!( "proposed payload too large: got {}B, max {}B", @@ -201,8 +187,11 @@ impl StateMachine { metrics::METRICS .leader_proposal_payload_size .observe(payload.0.len()); - let proposal = - validator::BlockHeader::new(&highest_qc.message.proposal, payload.hash()); + let proposal = validator::BlockHeader { + number, + parent, + payload: payload.hash(), + }; (proposal, Some(payload)) } }; @@ -214,8 +203,6 @@ impl StateMachine { .secret_key .sign_msg(validator::ConsensusMsg::LeaderPrepare( validator::LeaderPrepare { - protocol_version: crate::PROTOCOL_VERSION, - view: justification.view(), proposal, proposal_payload: payload, justification, diff --git a/node/actors/bft/src/leader/tests.rs b/node/actors/bft/src/leader/tests.rs index 41f1ee6f..b74e4411 100644 --- a/node/actors/bft/src/leader/tests.rs +++ b/node/actors/bft/src/leader/tests.rs @@ -1,12 +1,10 @@ -use super::{ - replica_commit::Error as ReplicaCommitError, replica_prepare::Error as ReplicaPrepareError, -}; +use super::*; use crate::testonly::ut_harness::UTHarness; use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator::{self, LeaderCommit, Phase, ViewNumber}; +use zksync_consensus_roles::validator::{self, Phase, ViewNumber}; #[tokio::test] async fn replica_prepare_sanity() { @@ -15,7 +13,7 @@ async fn replica_prepare_sanity() { scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - + tracing::info!("started"); util.new_leader_prepare(ctx).await; Ok(()) }) @@ -31,24 +29,24 @@ async fn replica_prepare_sanity_yield_leader_prepare() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); + util.produce_block(ctx).await; + let replica_prepare = util.new_replica_prepare(); let leader_prepare = util - .process_replica_prepare(ctx, replica_prepare.clone()) + .process_replica_prepare(ctx, util.sign(replica_prepare.clone())) .await .unwrap() .unwrap(); - assert_eq!( - leader_prepare.msg.protocol_version, - replica_prepare.msg.protocol_version - ); - assert_eq!(leader_prepare.msg.view, replica_prepare.msg.view); + assert_eq!(leader_prepare.msg.view(), &replica_prepare.view); assert_eq!( leader_prepare.msg.proposal.parent, - replica_prepare.msg.high_vote.proposal.hash() + replica_prepare + .high_vote + .as_ref() + .map(|v| v.proposal.hash()), ); assert_eq!( leader_prepare.msg.justification, - util.new_prepare_qc(|msg| *msg = replica_prepare.msg) + util.new_prepare_qc(|msg| *msg = replica_prepare) ); Ok(()) }) @@ -66,19 +64,15 @@ async fn replica_prepare_sanity_yield_leader_prepare_reproposal() { util.new_replica_commit(ctx).await; util.process_replica_timeout(ctx).await; - let replica_prepare = util.new_replica_prepare(|_| {}).msg; + let replica_prepare = util.new_replica_prepare(); let leader_prepare = util .process_replica_prepare_all(ctx, replica_prepare.clone()) .await; + assert_eq!(leader_prepare.msg.view(), &replica_prepare.view); assert_eq!( - leader_prepare.msg.protocol_version, - replica_prepare.protocol_version - ); - assert_eq!(leader_prepare.msg.view, replica_prepare.view); - assert_eq!( - leader_prepare.msg.proposal, - replica_prepare.high_vote.proposal + Some(leader_prepare.msg.proposal), + replica_prepare.high_vote.as_ref().map(|v| v.proposal), ); assert_eq!(leader_prepare.msg.proposal_payload, None); let map = leader_prepare.msg.justification.map; @@ -99,13 +93,12 @@ async fn replica_prepare_incompatible_protocol_version() { s.spawn_bg(runner.run(ctx)); let incompatible_protocol_version = util.incompatible_protocol_version(); - let replica_prepare = util.new_replica_prepare(|msg| { - msg.protocol_version = incompatible_protocol_version; - }); - let res = util.process_replica_prepare(ctx, replica_prepare).await; + let mut replica_prepare = util.new_replica_prepare(); + replica_prepare.view.protocol_version = incompatible_protocol_version; + let res = util.process_replica_prepare(ctx, util.sign(replica_prepare)).await; assert_matches!( res, - Err(ReplicaPrepareError::IncompatibleProtocolVersion { message_version, local_version }) => { + Err(replica_prepare::Error::IncompatibleProtocolVersion { message_version, local_version }) => { assert_eq!(message_version, incompatible_protocol_version); assert_eq!(local_version, util.protocol_version()); } @@ -122,14 +115,14 @@ async fn replica_prepare_non_validator_signer() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}).msg; + let replica_prepare = util.new_replica_prepare(); let non_validator_key: validator::SecretKey = ctx.rng().gen(); let res = util .process_replica_prepare(ctx, non_validator_key.sign_msg(replica_prepare)) .await; assert_matches!( res, - Err(ReplicaPrepareError::NonValidatorSigner { signer }) => { + Err(replica_prepare::Error::NonValidatorSigner { signer }) => { assert_eq!(signer, non_validator_key.public()); } ); @@ -147,13 +140,15 @@ async fn replica_prepare_old_view() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); util.leader.view = util.replica.view.next(); util.leader.phase = Phase::Prepare; - let res = util.process_replica_prepare(ctx, replica_prepare).await; + let res = util + .process_replica_prepare(ctx, util.sign(replica_prepare)) + .await; assert_matches!( res, - Err(ReplicaPrepareError::Old { + Err(replica_prepare::Error::Old { current_view: ViewNumber(2), current_phase: Phase::Prepare, }) @@ -172,13 +167,15 @@ async fn replica_prepare_during_commit() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); util.leader.view = util.replica.view; util.leader.phase = Phase::Commit; - let res = util.process_replica_prepare(ctx, replica_prepare).await; + let res = util + .process_replica_prepare(ctx, util.sign(replica_prepare)) + .await; assert_matches!( res, - Err(ReplicaPrepareError::Old { + Err(replica_prepare::Error::Old { current_view, current_phase: Phase::Commit, }) => { @@ -199,12 +196,12 @@ async fn replica_prepare_not_leader_in_view() { let (mut util, runner) = UTHarness::new(ctx, 2).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|msg| { - // Moving to the next view changes the leader. - msg.view = msg.view.next(); - }); - let res = util.process_replica_prepare(ctx, replica_prepare).await; - assert_matches!(res, Err(ReplicaPrepareError::NotLeaderInView)); + let mut replica_prepare = util.new_replica_prepare(); + replica_prepare.view.number = replica_prepare.view.number.next(); + let res = util + .process_replica_prepare(ctx, util.sign(replica_prepare)) + .await; + assert_matches!(res, Err(replica_prepare::Error::NotLeaderInView)); Ok(()) }) .await @@ -220,7 +217,8 @@ async fn replica_prepare_already_exists() { s.spawn_bg(runner.run(ctx)); util.set_owner_as_view_leader(); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); + let replica_prepare = util.sign(replica_prepare.clone()); assert!(util .process_replica_prepare(ctx, replica_prepare.clone()) .await @@ -231,7 +229,7 @@ async fn replica_prepare_already_exists() { .await; assert_matches!( res, - Err(ReplicaPrepareError::Exists { existing_message }) => { + Err(replica_prepare::Error::Exists { existing_message }) => { assert_eq!(existing_message, replica_prepare.msg); } ); @@ -250,9 +248,9 @@ async fn replica_prepare_num_received_below_threshold() { s.spawn_bg(runner.run(ctx)); util.set_owner_as_view_leader(); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); assert!(util - .process_replica_prepare(ctx, replica_prepare) + .process_replica_prepare(ctx, util.sign(replica_prepare)) .await .unwrap() .is_none()); @@ -270,10 +268,11 @@ async fn replica_prepare_invalid_sig() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut replica_prepare = util.new_replica_prepare(|_| {}); + let msg = util.new_replica_prepare(); + let mut replica_prepare = util.sign(msg); replica_prepare.sig = ctx.rng().gen(); let res = util.process_replica_prepare(ctx, replica_prepare).await; - assert_matches!(res, Err(ReplicaPrepareError::InvalidSignature(_))); + assert_matches!(res, Err(replica_prepare::Error::InvalidSignature(_))); Ok(()) }) .await @@ -284,39 +283,22 @@ async fn replica_prepare_invalid_sig() { async fn replica_prepare_invalid_commit_qc() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = ctx.rng().gen()); - let res = util.process_replica_prepare(ctx, replica_prepare).await; - assert_matches!(res, Err(ReplicaPrepareError::InvalidHighQC(..))); - Ok(()) - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn replica_prepare_high_qc_of_current_view() { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - scope::run!(ctx, |ctx, s| async { - let (mut util, runner) = UTHarness::new(ctx, 1).await; - s.spawn_bg(runner.run(ctx)); - - let view = ViewNumber(1); - let qc_view = ViewNumber(1); - util.set_view(view); - let qc = util.new_commit_qc(|msg| msg.view = qc_view); - let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = qc); - let res = util.process_replica_prepare(ctx, replica_prepare).await; + util.produce_block(ctx).await; + let mut replica_prepare = util.new_replica_prepare(); + replica_prepare.high_qc.as_mut().unwrap().signature = rng.gen(); + let res = util + .process_replica_prepare(ctx, util.sign(replica_prepare)) + .await; assert_matches!( res, - Err(ReplicaPrepareError::HighQCOfFutureView { high_qc_view, current_view }) => { - assert_eq!(high_qc_view, qc_view); - assert_eq!(current_view, view); - } + Err(replica_prepare::Error::InvalidMessage( + validator::ReplicaPrepareVerifyError::HighQC(_) + )) ); Ok(()) }) @@ -324,6 +306,8 @@ async fn replica_prepare_high_qc_of_current_view() { .unwrap(); } +/// Check that leader behaves correctly in case receiving ReplicaPrepare +/// with high_qc with future views (which shouldn't be available yet). #[tokio::test] async fn replica_prepare_high_qc_of_future_view() { zksync_concurrency::testonly::abort_on_panic(); @@ -332,19 +316,24 @@ async fn replica_prepare_high_qc_of_future_view() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let view = ViewNumber(1); - let qc_view = ViewNumber(2); - util.set_view(view); - let qc = util.new_commit_qc(|msg| msg.view = qc_view); - let replica_prepare = util.new_replica_prepare(|msg| msg.high_qc = qc); - let res = util.process_replica_prepare(ctx, replica_prepare).await; - assert_matches!( - res, - Err(ReplicaPrepareError::HighQCOfFutureView{ high_qc_view, current_view }) => { - assert_eq!(high_qc_view, qc_view); - assert_eq!(current_view, view); - } - ); + util.produce_block(ctx).await; + let mut view = util.replica_view(); + let mut replica_prepare = util.new_replica_prepare(); + // Check both the current view and next view. + for _ in 0..2 { + let qc = util.new_commit_qc(|msg| msg.view = view.clone()); + replica_prepare.high_qc = Some(qc); + let res = util + .process_replica_prepare(ctx, util.sign(replica_prepare.clone())) + .await; + assert_matches!( + res, + Err(replica_prepare::Error::InvalidMessage( + validator::ReplicaPrepareVerifyError::HighQCFutureView + )) + ); + view.number = view.number.next(); + } Ok(()) }) .await @@ -374,21 +363,16 @@ async fn replica_commit_sanity_yield_leader_commit() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); + util.produce_block(ctx).await; let replica_commit = util.new_replica_commit(ctx).await; let leader_commit = util - .process_replica_commit(ctx, replica_commit.clone()) + .process_replica_commit(ctx, util.sign(replica_commit.clone())) .await .unwrap() .unwrap(); - assert_matches!( - leader_commit.msg, - LeaderCommit { - protocol_version, - justification, - } => { - assert_eq!(protocol_version, replica_commit.msg.protocol_version); - assert_eq!(justification, util.new_commit_qc(|msg| *msg = replica_commit.msg)); - } + assert_eq!( + leader_commit.msg.justification, + util.new_commit_qc(|msg| *msg = replica_commit) ); Ok(()) }) @@ -405,14 +389,14 @@ async fn replica_commit_incompatible_protocol_version() { s.spawn_bg(runner.run(ctx)); let incompatible_protocol_version = util.incompatible_protocol_version(); - let mut replica_commit = util.new_replica_commit(ctx).await.msg; - replica_commit.protocol_version = incompatible_protocol_version; + let mut replica_commit = util.new_replica_commit(ctx).await; + replica_commit.view.protocol_version = incompatible_protocol_version; let res = util - .process_replica_commit(ctx, util.owner_key().sign_msg(replica_commit)) + .process_replica_commit(ctx, util.sign(replica_commit)) .await; assert_matches!( res, - Err(ReplicaCommitError::IncompatibleProtocolVersion { message_version, local_version }) => { + Err(replica_commit::Error::IncompatibleProtocolVersion { message_version, local_version }) => { assert_eq!(message_version, incompatible_protocol_version); assert_eq!(local_version, util.protocol_version()); } @@ -429,14 +413,14 @@ async fn replica_commit_non_validator_signer() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_commit = util.new_replica_commit(ctx).await.msg; + let replica_commit = util.new_replica_commit(ctx).await; let non_validator_key: validator::SecretKey = ctx.rng().gen(); let res = util .process_replica_commit(ctx, non_validator_key.sign_msg(replica_commit)) .await; assert_matches!( res, - Err(ReplicaCommitError::NonValidatorSigner { signer }) => { + Err(replica_commit::Error::NonValidatorSigner { signer }) => { assert_eq!(signer, non_validator_key.public()); } ); @@ -454,13 +438,13 @@ async fn replica_commit_old() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut replica_commit = util.new_replica_commit(ctx).await.msg; - replica_commit.view = util.replica.view.prev(); - let replica_commit = util.owner_key().sign_msg(replica_commit); + let mut replica_commit = util.new_replica_commit(ctx).await; + replica_commit.view.number = ViewNumber(util.replica.view.0 - 1); + let replica_commit = util.sign(replica_commit); let res = util.process_replica_commit(ctx, replica_commit).await; assert_matches!( res, - Err(ReplicaCommitError::Old { current_view, current_phase }) => { + Err(replica_commit::Error::Old { current_view, current_phase }) => { assert_eq!(current_view, util.replica.view); assert_eq!(current_phase, util.replica.phase); } @@ -479,12 +463,14 @@ async fn replica_commit_not_leader_in_view() { let (mut util, runner) = UTHarness::new(ctx, 2).await; s.spawn_bg(runner.run(ctx)); + util.produce_block(ctx).await; let current_view_leader = util.view_leader(util.replica.view); assert_ne!(current_view_leader, util.owner_key().public()); - - let replica_commit = util.new_current_replica_commit(|_| {}); - let res = util.process_replica_commit(ctx, replica_commit).await; - assert_matches!(res, Err(ReplicaCommitError::NotLeaderInView)); + let replica_commit = util.new_current_replica_commit(); + let res = util + .process_replica_commit(ctx, util.sign(replica_commit)) + .await; + assert_matches!(res, Err(replica_commit::Error::NotLeaderInView)); Ok(()) }) .await @@ -501,17 +487,17 @@ async fn replica_commit_already_exists() { let replica_commit = util.new_replica_commit(ctx).await; assert!(util - .process_replica_commit(ctx, replica_commit.clone()) + .process_replica_commit(ctx, util.sign(replica_commit.clone())) .await .unwrap() .is_none()); let res = util - .process_replica_commit(ctx, replica_commit.clone()) + .process_replica_commit(ctx, util.sign(replica_commit.clone())) .await; assert_matches!( res, - Err(ReplicaCommitError::DuplicateMessage { existing_message }) => { - assert_eq!(existing_message, replica_commit.msg) + Err(replica_commit::Error::DuplicateMessage { existing_message }) => { + assert_eq!(existing_message, replica_commit) } ); Ok(()) @@ -528,13 +514,13 @@ async fn replica_commit_num_received_below_threshold() { let (mut util, runner) = UTHarness::new(ctx, 2).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); assert!(util - .process_replica_prepare(ctx, replica_prepare.clone()) + .process_replica_prepare(ctx, util.sign(replica_prepare.clone())) .await .unwrap() .is_none()); - let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); + let replica_prepare = util.keys[1].sign_msg(replica_prepare); let leader_prepare = util .process_replica_prepare(ctx, replica_prepare) .await @@ -561,16 +547,20 @@ async fn replica_commit_invalid_sig() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut replica_commit = util.new_current_replica_commit(|_| {}); + let msg = util.new_replica_commit(ctx).await; + let mut replica_commit = util.sign(msg); replica_commit.sig = ctx.rng().gen(); let res = util.process_replica_commit(ctx, replica_commit).await; - assert_matches!(res, Err(ReplicaCommitError::InvalidSignature(..))); + assert_matches!(res, Err(replica_commit::Error::InvalidSignature(..))); Ok(()) }) .await .unwrap(); } +/// ReplicaCommit received before sending out LeaderPrepare. +/// Whether leader accepts the message or rejects doesn't matter. +/// It just shouldn't crash. #[tokio::test] async fn replica_commit_unexpected_proposal() { zksync_concurrency::testonly::abort_on_panic(); @@ -579,10 +569,11 @@ async fn replica_commit_unexpected_proposal() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_commit = util.new_current_replica_commit(|_| {}); - util.process_replica_commit(ctx, replica_commit) - .await - .unwrap(); + util.produce_block(ctx).await; + let replica_commit = util.new_current_replica_commit(); + let _ = util + .process_replica_commit(ctx, util.sign(replica_commit)) + .await; Ok(()) }) .await diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index 74b750c2..67aa99f9 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -26,7 +26,6 @@ mod config; pub mod io; mod leader; mod metrics; -pub mod misc; mod replica; pub mod testonly; #[cfg(test)] diff --git a/node/actors/bft/src/misc.rs b/node/actors/bft/src/misc.rs deleted file mode 100644 index a8639000..00000000 --- a/node/actors/bft/src/misc.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Miscellaneous functions related to the consensus. - -/// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, -/// for a given number of replicas. -pub fn consensus_threshold(num_validators: usize) -> usize { - let faulty_replicas = faulty_replicas(num_validators); - - // Return the consensus threshold, which is simply: - // t = n - f - num_validators - faulty_replicas -} - -/// Calculate the maximum number of faulty replicas, for a given number of replicas. -pub fn faulty_replicas(num_validators: usize) -> usize { - // Calculate the allowed maximum number of faulty replicas. We want the following relationship to hold: - // n = 5*f + 1 - // for n total replicas and f faulty replicas. This results in the following formula for the maximum - // number of faulty replicas: - // f = floor((n - 1) / 5) - // Because of this, it doesn't make sense to have 5*f + 2 or 5*f + 3 replicas. It won't increase the number - // of allowed faulty replicas. - (num_validators - 1) / 5 -} diff --git a/node/actors/bft/src/replica/block.rs b/node/actors/bft/src/replica/block.rs index 75d2404c..eb00cd9f 100644 --- a/node/actors/bft/src/replica/block.rs +++ b/node/actors/bft/src/replica/block.rs @@ -13,16 +13,22 @@ impl StateMachine { ctx: &ctx::Ctx, commit_qc: &validator::CommitQC, ) -> ctx::Result<()> { + // Update high_qc. + if self + .high_qc + .as_ref() + .map(|qc| qc.view().number < commit_qc.view().number) + .unwrap_or(true) + { + self.high_qc = Some(commit_qc.clone()); + } // TODO(gprusak): for availability of finalized blocks, // replicas should be able to broadcast highest quorums without // the corresponding block (same goes for synchronization). - let Some(cache) = self - .block_proposal_cache - .get(&commit_qc.message.proposal.number) - else { + let Some(cache) = self.block_proposal_cache.get(&commit_qc.header().number) else { return Ok(()); }; - let Some(payload) = cache.get(&commit_qc.message.proposal.payload) else { + let Some(payload) = cache.get(&commit_qc.header().payload) else { return Ok(()); }; let block = validator::FinalBlock { diff --git a/node/actors/bft/src/replica/leader_commit.rs b/node/actors/bft/src/replica/leader_commit.rs index a7e99e9e..926ac6e8 100644 --- a/node/actors/bft/src/replica/leader_commit.rs +++ b/node/actors/bft/src/replica/leader_commit.rs @@ -1,3 +1,4 @@ +//! Handler of a LeaderCommit message. use super::StateMachine; use tracing::instrument; use zksync_concurrency::{ctx, error::Wrap}; @@ -15,14 +16,12 @@ pub(crate) enum Error { local_version: ProtocolVersion, }, /// Invalid leader. - #[error( - "invalid leader (correct leader: {correct_leader:?}, received leader: {received_leader:?})" - )] - InvalidLeader { - /// Correct leader. - correct_leader: validator::PublicKey, + #[error("bad leader: got {got:?}, want {want:?}")] + BadLeader { /// Received leader. - received_leader: validator::PublicKey, + got: validator::PublicKey, + /// Correct leader. + want: validator::PublicKey, }, /// Past view of phase. #[error("past view/phase (current view: {current_view:?}, current phase: {current_phase:?})")] @@ -34,10 +33,10 @@ pub(crate) enum Error { }, /// Invalid message signature. #[error("invalid signature: {0:#}")] - InvalidSignature(#[source] validator::Error), - /// Invalid justification for the message. - #[error("invalid justification: {0:#}")] - InvalidJustification(#[source] anyhow::Error), + InvalidSignature(validator::Error), + /// Invalid message. + #[error("invalid message: {0:#}")] + InvalidMessage(validator::CommitQCVerifyError), /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. #[error(transparent)] Internal(#[from] ctx::Error), @@ -69,26 +68,30 @@ impl StateMachine { // Unwrap message. let message = &signed_message.msg; let author = &signed_message.key; - let view = message.justification.message.view; // Check protocol version compatibility. - if !crate::PROTOCOL_VERSION.compatible(&message.protocol_version) { + if !crate::PROTOCOL_VERSION.compatible(&message.view().protocol_version) { return Err(Error::IncompatibleProtocolVersion { - message_version: message.protocol_version, + message_version: message.view().protocol_version, local_version: crate::PROTOCOL_VERSION, }); } // Check that it comes from the correct leader. - if author != &self.config.view_leader(view) { - return Err(Error::InvalidLeader { - correct_leader: self.config.view_leader(view), - received_leader: author.clone(), + let leader = self + .config + .genesis() + .validators + .view_leader(message.view().number); + if author != &leader { + return Err(Error::BadLeader { + want: leader, + got: author.clone(), }); } // If the message is from the "past", we discard it. - if (view, validator::Phase::Commit) < (self.view, self.phase) { + if (message.view().number, validator::Phase::Commit) < (self.view, self.phase) { return Err(Error::Old { current_view: self.view, current_phase: self.phase, @@ -99,14 +102,9 @@ impl StateMachine { // Check the signature on the message. signed_message.verify().map_err(Error::InvalidSignature)?; - - // ----------- Checking the justification of the message -------------- - - // Verify the QuorumCertificate. message - .justification - .verify(&self.config.validator_set, self.config.threshold()) - .map_err(Error::InvalidJustification)?; + .verify(self.config.genesis()) + .map_err(Error::InvalidMessage)?; // ----------- All checks finished. Now we process the message. -------------- @@ -115,14 +113,8 @@ impl StateMachine { .await .wrap("save_block()")?; - // Update the state machine. We don't update the view and phase (or backup our state) here - // because we will do it when we start the new view. - if message.justification.message.view >= self.high_qc.message.view { - self.high_qc = message.justification.clone(); - } - // Start a new view. But first we skip to the view of this message. - self.view = view; + self.view = message.view().number; self.start_new_view(ctx).await.wrap("start_new_view()")?; Ok(()) diff --git a/node/actors/bft/src/replica/leader_prepare.rs b/node/actors/bft/src/replica/leader_prepare.rs index 04b6a2e9..7e017bcb 100644 --- a/node/actors/bft/src/replica/leader_prepare.rs +++ b/node/actors/bft/src/replica/leader_prepare.rs @@ -1,5 +1,5 @@ +//! Handler of a LeaderPrepare message. use super::StateMachine; -use std::collections::HashMap; use tracing::instrument; use zksync_concurrency::{ctx, error::Wrap}; use zksync_consensus_network::io::{ConsensusInputMessage, Target}; @@ -39,76 +39,20 @@ pub(crate) enum Error { /// Invalid message signature. #[error("invalid signature: {0:#}")] InvalidSignature(#[source] validator::Error), - /// Invalid `PrepareQC` message. - #[error("invalid PrepareQC: {0:#}")] - InvalidPrepareQC(#[source] anyhow::Error), - /// Invalid `HighQC` message. - #[error("invalid high QC: {0:#}")] - InvalidHighQC(#[source] anyhow::Error), - /// High QC of a future view. - #[error( - "high QC of a future view (high QC view: {high_qc_view:?}, current view: {current_view:?}" - )] - HighQCOfFutureView { - /// Received high QC view. - high_qc_view: validator::ViewNumber, - /// Current view. - current_view: validator::ViewNumber, - }, + /// Invalid message. + #[error("invalid message: {0:#}")] + InvalidMessage(#[source] validator::LeaderPrepareVerifyError), /// Previous proposal was not finalized. - #[error("new block proposal when the previous proposal was not finalized")] - ProposalWhenPreviousNotFinalized, - /// Invalid parent hash. - #[error( - "block proposal with invalid parent hash (correct parent hash: {correct_parent_hash:#?}, \ - received parent hash: {received_parent_hash:#?}, block: {header:?})" - )] - ProposalInvalidParentHash { - /// Correct parent hash. - correct_parent_hash: validator::BlockHeaderHash, - /// Received parent hash. - received_parent_hash: validator::BlockHeaderHash, - /// Header including the incorrect parent hash. - header: validator::BlockHeader, - }, - /// Non-sequential proposal number. - #[error( - "block proposal with non-sequential number (correct proposal number: {correct_number}, \ - received proposal number: {received_number}, block: {header:?})" - )] - ProposalNonSequentialNumber { - /// Correct proposal number. - correct_number: validator::BlockNumber, - /// Received proposal number. - received_number: validator::BlockNumber, - /// Header including the incorrect proposal number. - header: validator::BlockHeader, - }, - /// Mismatched payload. - #[error("block proposal with mismatched payload")] - ProposalMismatchedPayload, + /// Oversized payload. - #[error( - "block proposal with an oversized payload (payload size: {payload_size}, block: {header:?}" - )] + #[error("block proposal with an oversized payload (payload size: {payload_size})")] ProposalOversizedPayload { /// Size of the payload. payload_size: usize, - /// Proposal header corresponding to the payload. - header: validator::BlockHeader, }, /// Invalid payload. #[error("invalid payload: {0:#}")] ProposalInvalidPayload(#[source] anyhow::Error), - /// Re-proposal without quorum. - #[error("block re-proposal without quorum for the re-proposal")] - ReproposalWithoutQuorum, - /// Re-proposal when the previous proposal was finalized. - #[error("block re-proposal when the previous proposal was finalized")] - ReproposalWhenFinalized, - /// Re-proposal of invalid block. - #[error("block re-proposal of invalid block")] - ReproposalInvalidBlock, /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. #[error(transparent)] Internal(#[from] ctx::Error), @@ -139,20 +83,21 @@ impl StateMachine { // Unwrap message. let message = &signed_message.msg; let author = &signed_message.key; - let view = message.view; + let view = message.view().number; // Check protocol version compatibility. - if !crate::PROTOCOL_VERSION.compatible(&message.protocol_version) { + if !crate::PROTOCOL_VERSION.compatible(&message.view().protocol_version) { return Err(Error::IncompatibleProtocolVersion { - message_version: message.protocol_version, + message_version: message.view().protocol_version, local_version: crate::PROTOCOL_VERSION, }); } // Check that it comes from the correct leader. - if author != &self.config.view_leader(view) { + let leader = self.config.genesis().validators.view_leader(view); + if author != &leader { return Err(Error::InvalidLeader { - correct_leader: self.config.view_leader(view), + correct_leader: leader, received_leader: author.clone(), }); } @@ -165,136 +110,40 @@ impl StateMachine { }); } - // ----------- Checking the signed part of the message -------------- + // ----------- Checking the message -------------- signed_message.verify().map_err(Error::InvalidSignature)?; - - // ----------- Checking the justification of the message -------------- - - // Verify the PrepareQC. message - .justification - .verify(view, &self.config.validator_set, self.config.threshold()) - .map_err(Error::InvalidPrepareQC)?; - - // Get the highest block voted and check if there's a quorum of votes for it. To have a quorum - // in this situation, we require 2*f+1 votes, where f is the maximum number of faulty replicas. - let mut vote_count: HashMap<_, usize> = HashMap::new(); - - for (msg, signers) in &message.justification.map { - *vote_count.entry(msg.high_vote.proposal).or_default() += signers.len(); - } + .verify(self.config.genesis()) + .map_err(Error::InvalidMessage)?; + let high_qc = message.justification.high_qc(); - let highest_vote: Option = vote_count - .into_iter() - // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. - .find(|(_, v)| *v > 2 * self.config.faulty_replicas()) - .map(|(h, _)| h); - - // Get the highest CommitQC and verify it. - let highest_qc: validator::CommitQC = message - .justification - .map - .keys() - .max_by_key(|m| m.high_qc.message.view) - .unwrap() - .high_qc - .clone(); - - highest_qc - .verify(&self.config.validator_set, self.config.threshold()) - .map_err(Error::InvalidHighQC)?; - - // If the high QC is for a future view, we discard the message. - // This check is not necessary for correctness, but it's useful to - // guarantee that our messages don't contain QCs from the future. - if highest_qc.message.view >= view { - return Err(Error::HighQCOfFutureView { - high_qc_view: highest_qc.message.view, - current_view: view, - }); - } - - // Try to create a finalized block with this CommitQC and our block proposal cache. - // This gives us another chance to finalize a block that we may have missed before. - self.save_block(ctx, &highest_qc) - .await - .wrap("save_block()")?; - - // ----------- Checking the block proposal -------------- - - // Check that the proposal is valid. - match &message.proposal_payload { - // The leader proposed a new block. - Some(payload) => { - // Check that the payload doesn't exceed the maximum size. - if payload.0.len() > self.config.max_payload_size { - return Err(Error::ProposalOversizedPayload { - payload_size: payload.0.len(), - header: message.proposal, - }); - } - - // Check that payload matches the header - if message.proposal.payload != payload.hash() { - return Err(Error::ProposalMismatchedPayload); - } - - // Check that we finalized the previous block. - if highest_vote.is_some() - && highest_vote.as_ref() != Some(&highest_qc.message.proposal) - { - return Err(Error::ProposalWhenPreviousNotFinalized); - } - - // Parent hash should match. - if highest_qc.message.proposal.hash() != message.proposal.parent { - return Err(Error::ProposalInvalidParentHash { - correct_parent_hash: highest_qc.message.proposal.hash(), - received_parent_hash: message.proposal.parent, - header: message.proposal, - }); - } - - // Block number should match. - if highest_qc.message.proposal.number.next() != message.proposal.number { - return Err(Error::ProposalNonSequentialNumber { - correct_number: highest_qc.message.proposal.number.next(), - received_number: message.proposal.number, - header: message.proposal, - }); - } + // Check that the payload doesn't exceed the maximum size. + if let Some(payload) = &message.proposal_payload { + if payload.0.len() > self.config.max_payload_size { + return Err(Error::ProposalOversizedPayload { + payload_size: payload.0.len(), + }); + } - // Payload should be valid. + if let Some(prev) = message.proposal.number.prev() { // Defensively assume that PayloadManager cannot verify proposal until the previous block is stored. self.config .block_store - .wait_until_persisted(ctx, highest_qc.header().number) + .wait_until_persisted(ctx, prev) .await .map_err(ctx::Error::Canceled)?; - if let Err(err) = self - .config - .payload_manager - .verify(ctx, message.proposal.number, payload) - .await - { - return Err(match err { - err @ ctx::Error::Canceled(_) => Error::Internal(err), - ctx::Error::Internal(err) => Error::ProposalInvalidPayload(err), - }); - } } - // The leader is re-proposing a past block. - None => { - let Some(highest_vote) = highest_vote else { - return Err(Error::ReproposalWithoutQuorum); - }; - if highest_vote == highest_qc.message.proposal { - return Err(Error::ReproposalWhenFinalized); - } - if highest_vote != message.proposal { - return Err(Error::ReproposalInvalidBlock); - } + if let Err(err) = self + .config + .payload_manager + .verify(ctx, message.proposal.number, payload) + .await + { + return Err(match err { + err @ ctx::Error::Canceled(_) => Error::Internal(err), + ctx::Error::Internal(err) => Error::ProposalInvalidPayload(err), + }); } } @@ -302,20 +151,19 @@ impl StateMachine { // Create our commit vote. let commit_vote = validator::ReplicaCommit { - protocol_version: crate::PROTOCOL_VERSION, - view, + view: message.view().clone(), proposal: message.proposal, }; // Update the state machine. - self.view = view; + self.view = message.view().number; self.phase = validator::Phase::Commit; - self.high_vote = commit_vote; - - if highest_qc.message.view > self.high_qc.message.view { - self.high_qc = highest_qc; + self.high_vote = Some(commit_vote.clone()); + if let Some(high_qc) = high_qc { + // Try to create a finalized block with this CommitQC and our block proposal cache. + // This gives us another chance to finalize a block that we may have missed before. + self.save_block(ctx, high_qc).await.wrap("save_block()")?; } - // If we received a new block proposal, store it in our cache. if let Some(payload) = &message.proposal_payload { self.block_proposal_cache diff --git a/node/actors/bft/src/replica/mod.rs b/node/actors/bft/src/replica/mod.rs index 69ede4b0..05a72481 100644 --- a/node/actors/bft/src/replica/mod.rs +++ b/node/actors/bft/src/replica/mod.rs @@ -3,16 +3,12 @@ //! node will perform both the replica and leader roles simultaneously. mod block; -mod leader_commit; -mod leader_prepare; +pub(crate) mod leader_commit; +pub(crate) mod leader_prepare; mod new_view; mod state_machine; #[cfg(test)] mod tests; mod timer; -#[cfg(test)] -pub(crate) use self::leader_commit::Error as LeaderCommitError; -#[cfg(test)] -pub(crate) use self::leader_prepare::Error as LeaderPrepareError; pub(crate) use self::state_machine::StateMachine; diff --git a/node/actors/bft/src/replica/new_view.rs b/node/actors/bft/src/replica/new_view.rs index b1f73d7a..eaf9c7da 100644 --- a/node/actors/bft/src/replica/new_view.rs +++ b/node/actors/bft/src/replica/new_view.rs @@ -11,14 +11,13 @@ impl StateMachine { tracing::info!("Starting view {}", self.view.next().0); // Update the state machine. - let next_view = self.view.next(); - - self.view = next_view; + self.view = self.view.next(); self.phase = validator::Phase::Prepare; - - // Clear the block cache. - self.block_proposal_cache - .retain(|k, _| k > &self.high_qc.message.proposal.number); + if let Some(qc) = self.high_qc.as_ref() { + // Clear the block cache. + self.block_proposal_cache + .retain(|k, _| k > &qc.header().number); + } // Backup our state. self.backup_state(ctx).await.wrap("backup_state()")?; @@ -30,13 +29,16 @@ impl StateMachine { .secret_key .sign_msg(validator::ConsensusMsg::ReplicaPrepare( validator::ReplicaPrepare { - protocol_version: crate::PROTOCOL_VERSION, - view: next_view, - high_vote: self.high_vote, + view: validator::View { + protocol_version: crate::PROTOCOL_VERSION, + fork: self.config.genesis().fork.number, + number: self.view, + }, + high_vote: self.high_vote.clone(), high_qc: self.high_qc.clone(), }, )), - recipient: Target::Validator(self.config.view_leader(next_view)), + recipient: Target::Validator(self.config.genesis().validators.view_leader(self.view)), }; self.outbound_pipe.send(output_message.into()); diff --git a/node/actors/bft/src/replica/state_machine.rs b/node/actors/bft/src/replica/state_machine.rs index 15e62335..f1dd946e 100644 --- a/node/actors/bft/src/replica/state_machine.rs +++ b/node/actors/bft/src/replica/state_machine.rs @@ -23,9 +23,9 @@ pub(crate) struct StateMachine { /// The current phase. pub(crate) phase: validator::Phase, /// The highest block proposal that the replica has committed to. - pub(crate) high_vote: validator::ReplicaCommit, + pub(crate) high_vote: Option, /// The highest commit quorum certificate known to the replica. - pub(crate) high_qc: validator::CommitQC, + pub(crate) high_qc: Option, /// A cache of the received block proposals. pub(crate) block_proposal_cache: BTreeMap>, @@ -45,10 +45,7 @@ impl StateMachine { config: Arc, outbound_pipe: OutputSender, ) -> ctx::Result<(Self, sync::prunable_mpsc::Sender)> { - let backup = match config.replica_store.state(ctx).await? { - Some(backup) => backup, - None => config.block_store.subscribe().borrow().last.clone().into(), - }; + let backup = config.replica_store.state(ctx).await?; let mut block_proposal_cache: BTreeMap<_, HashMap<_, _>> = BTreeMap::new(); for proposal in backup.proposals { block_proposal_cache @@ -152,7 +149,7 @@ impl StateMachine { let backup = storage::ReplicaState { view: self.view, phase: self.phase, - high_vote: self.high_vote, + high_vote: self.high_vote.clone(), high_qc: self.high_qc.clone(), proposals, }; diff --git a/node/actors/bft/src/replica/tests.rs b/node/actors/bft/src/replica/tests.rs index 0cd2f731..3866e813 100644 --- a/node/actors/bft/src/replica/tests.rs +++ b/node/actors/bft/src/replica/tests.rs @@ -10,39 +10,32 @@ use zksync_consensus_roles::validator::{ self, CommitQC, Payload, PrepareQC, ReplicaCommit, ReplicaPrepare, ViewNumber, }; +/// Sanity check of the happy path. #[tokio::test] -async fn leader_prepare_sanity() { +async fn block_production() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - - let leader_prepare = util.new_leader_prepare(ctx).await; - util.process_leader_prepare(ctx, leader_prepare) - .await - .unwrap(); + util.produce_block(ctx).await; Ok(()) }) .await .unwrap(); } +/// Sanity check of block production with reproposal. #[tokio::test] -async fn leader_prepare_reproposal_sanity() { +async fn reproposal_block_production() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - - util.new_replica_commit(ctx).await; + util.new_leader_commit(ctx).await; util.process_replica_timeout(ctx).await; - let leader_prepare = util.new_leader_prepare(ctx).await; - assert!(leader_prepare.msg.proposal_payload.is_none()); - util.process_leader_prepare(ctx, leader_prepare) - .await - .unwrap(); + util.produce_block(ctx).await; Ok(()) }) .await @@ -58,10 +51,10 @@ async fn leader_prepare_incompatible_protocol_version() { s.spawn_bg(runner.run(ctx)); let incompatible_protocol_version = util.incompatible_protocol_version(); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.protocol_version = incompatible_protocol_version; + let mut leader_prepare = util.new_leader_prepare(ctx).await; + leader_prepare.justification.view.protocol_version = incompatible_protocol_version; let res = util - .process_leader_prepare(ctx, util.owner_key().sign_msg(leader_prepare)) + .process_leader_prepare(ctx, util.sign(leader_prepare)) .await; assert_matches!( res, @@ -84,15 +77,14 @@ async fn leader_prepare_sanity_yield_replica_commit() { let leader_prepare = util.new_leader_prepare(ctx).await; let replica_commit = util - .process_leader_prepare(ctx, leader_prepare.clone()) + .process_leader_prepare(ctx, util.sign(leader_prepare.clone())) .await .unwrap(); assert_eq!( replica_commit.msg, ReplicaCommit { - protocol_version: leader_prepare.msg.protocol_version, - view: leader_prepare.msg.view, - proposal: leader_prepare.msg.proposal, + view: leader_prepare.view().clone(), + proposal: leader_prepare.proposal, } ); Ok(()) @@ -113,25 +105,29 @@ async fn leader_prepare_invalid_leader() { util.set_view(view); assert_eq!(util.view_leader(view), util.keys[0].public()); - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); assert!(util - .process_replica_prepare(ctx, replica_prepare.clone()) + .process_replica_prepare(ctx, util.sign(replica_prepare.clone())) .await .unwrap() .is_none()); - let replica_prepare = util.keys[1].sign_msg(replica_prepare.msg); + let replica_prepare = util.keys[1].sign_msg(replica_prepare); let mut leader_prepare = util .process_replica_prepare(ctx, replica_prepare) .await .unwrap() .unwrap() .msg; - leader_prepare.view = leader_prepare.view.next(); - assert_ne!(util.view_leader(leader_prepare.view), util.keys[0].public()); + leader_prepare.justification.view.number = leader_prepare.justification.view.number.next(); + assert_ne!( + util.view_leader(leader_prepare.view().number), + util.keys[0].public() + ); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; assert_matches!( res, Err(leader_prepare::Error::InvalidLeader { correct_leader, received_leader }) => { @@ -153,10 +149,11 @@ async fn leader_prepare_old_view() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.view = util.replica.view.prev(); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await; + leader_prepare.justification.view.number.0 = util.replica.view.0 - 1; + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; assert_matches!( res, Err(leader_prepare::Error::Old { current_view, current_phase }) => { @@ -183,17 +180,17 @@ async fn leader_prepare_invalid_payload() { let leader_prepare = util.new_leader_prepare(ctx).await; // Insert a finalized block to the storage. + let mut justification = CommitQC::new( + ReplicaCommit { + view: util.replica_view(), + proposal: leader_prepare.proposal, + }, + util.genesis(), + ); + justification.add(&util.sign(justification.message.clone()), util.genesis()); let block = validator::FinalBlock { - payload: leader_prepare.msg.proposal_payload.clone().unwrap(), - justification: CommitQC::from( - &[util.keys[0].sign_msg(ReplicaCommit { - protocol_version: util.protocol_version(), - view: util.replica.view, - proposal: leader_prepare.msg.proposal, - })], - &util.validator_set(), - ) - .unwrap(), + payload: leader_prepare.proposal_payload.clone().unwrap(), + justification, }; util.replica .config @@ -202,7 +199,9 @@ async fn leader_prepare_invalid_payload() { .await .unwrap(); - let res = util.process_leader_prepare(ctx, leader_prepare).await; + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; assert_matches!(res, Err(leader_prepare::Error::ProposalInvalidPayload(..))); Ok(()) }) @@ -217,8 +216,8 @@ async fn leader_prepare_invalid_sig() { scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - - let mut leader_prepare = util.new_leader_prepare(ctx).await; + let leader_prepare = util.new_leader_prepare(ctx).await; + let mut leader_prepare = util.sign(leader_prepare); leader_prepare.sig = ctx.rng().gen(); let res = util.process_leader_prepare(ctx, leader_prepare).await; assert_matches!(res, Err(leader_prepare::Error::InvalidSignature(..))); @@ -236,30 +235,17 @@ async fn leader_prepare_invalid_prepare_qc() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.justification = ctx.rng().gen(); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::InvalidPrepareQC(_))); - Ok(()) - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn leader_prepare_invalid_high_qc() { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - scope::run!(ctx, |ctx, s| async { - let (mut util, runner) = UTHarness::new(ctx, 1).await; - s.spawn_bg(runner.run(ctx)); - - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_qc = ctx.rng().gen()); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::InvalidHighQC(_))); + let mut leader_prepare = util.new_leader_prepare(ctx).await; + leader_prepare.justification.signature = ctx.rng().gen(); + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; + assert_matches!( + res, + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::Justification(_) + )) + ); Ok(()) }) .await @@ -275,18 +261,17 @@ async fn leader_prepare_proposal_oversized_payload() { s.spawn_bg(runner.run(ctx)); let payload_oversize = MAX_PAYLOAD_SIZE + 1; - let payload_vec = vec![0; payload_oversize]; - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.proposal_payload = Some(Payload(payload_vec)); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); + let payload = Payload(vec![0; payload_oversize]); + let mut leader_prepare = util.new_leader_prepare(ctx).await; + leader_prepare.proposal.payload = payload.hash(); + leader_prepare.proposal_payload = Some(payload); let res = util - .process_leader_prepare(ctx, leader_prepare.clone()) + .process_leader_prepare(ctx, util.sign(leader_prepare)) .await; assert_matches!( res, - Err(leader_prepare::Error::ProposalOversizedPayload{ payload_size, header }) => { + Err(leader_prepare::Error::ProposalOversizedPayload{ payload_size }) => { assert_eq!(payload_size, payload_oversize); - assert_eq!(header, leader_prepare.msg.proposal); } ); Ok(()) @@ -303,11 +288,17 @@ async fn leader_prepare_proposal_mismatched_payload() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; + let mut leader_prepare = util.new_leader_prepare(ctx).await; leader_prepare.proposal_payload = Some(ctx.rng().gen()); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::ProposalMismatchedPayload)); + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; + assert_matches!( + res, + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::ProposalMismatchedPayload + )) + ); Ok(()) }) .await @@ -318,23 +309,28 @@ async fn leader_prepare_proposal_mismatched_payload() { async fn leader_prepare_proposal_when_previous_not_finalized() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); - let mut leader_prepare = util - .process_replica_prepare(ctx, replica_prepare) - .await - .unwrap() - .unwrap() - .msg; - leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; + tracing::info!("Execute view without replicas receiving the LeaderCommit."); + util.new_leader_commit(ctx).await; + util.process_replica_timeout(ctx).await; + tracing::info!("Make leader repropose the block."); + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!("Modify the message to include a new proposal anyway."); + let payload: Payload = rng.gen(); + leader_prepare.proposal.payload = payload.hash(); + leader_prepare.proposal_payload = Some(payload); + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; assert_matches!( res, - Err(leader_prepare::Error::ProposalWhenPreviousNotFinalized) + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::ProposalWhenPreviousNotFinalized + )) ); Ok(()) }) @@ -343,37 +339,26 @@ async fn leader_prepare_proposal_when_previous_not_finalized() { } #[tokio::test] -async fn leader_prepare_proposal_invalid_parent_hash() { +async fn leader_prepare_bad_parent_hash() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); - let mut leader_prepare = util - .process_replica_prepare(ctx, replica_prepare.clone()) - .await - .unwrap() - .unwrap() - .msg; - leader_prepare.proposal.parent = ctx.rng().gen(); - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util - .process_leader_prepare(ctx, leader_prepare.clone()) - .await; - assert_matches!( - res, - Err(leader_prepare::Error::ProposalInvalidParentHash { - correct_parent_hash, - received_parent_hash, - header - }) => { - assert_eq!(correct_parent_hash, replica_prepare.msg.high_vote.proposal.hash()); - assert_eq!(received_parent_hash, leader_prepare.msg.proposal.parent); - assert_eq!(header, leader_prepare.msg.proposal); - } - ); + tracing::info!("Produce initial block."); + util.produce_block(ctx).await; + tracing::info!("Make leader propose the next block."); + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!("Modify the proposal.parent so that it doesn't match the previous block"); + leader_prepare.proposal.parent = Some(ctx.rng().gen()); + let res = util.process_leader_prepare(ctx, util.sign(leader_prepare.clone())).await; + assert_matches!(res, Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::BadParentHash { got, want } + )) => { + assert_eq!(want, Some(leader_prepare.justification.high_qc().unwrap().message.proposal.hash())); + assert_eq!(got, leader_prepare.proposal.parent); + }); Ok(()) }) .await @@ -381,37 +366,27 @@ async fn leader_prepare_proposal_invalid_parent_hash() { } #[tokio::test] -async fn leader_prepare_proposal_non_sequential_number() { +async fn leader_prepare_bad_block_number() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); scope::run!(ctx, |ctx,s| async { let (mut util,runner) = UTHarness::new(ctx,1).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}); - let mut leader_prepare = util - .process_replica_prepare(ctx, replica_prepare.clone()) - .await - .unwrap() - .unwrap() - .msg; - let correct_num = replica_prepare.msg.high_vote.proposal.number.next(); - assert_eq!(correct_num, leader_prepare.proposal.number); - - let non_seq_num = correct_num.next(); - leader_prepare.proposal.number = non_seq_num; - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util - .process_leader_prepare(ctx, leader_prepare.clone()) - .await; - assert_matches!( - res, - Err(leader_prepare::Error::ProposalNonSequentialNumber { correct_number, received_number, header }) => { - assert_eq!(correct_number, correct_num); - assert_eq!(received_number, non_seq_num); - assert_eq!(header, leader_prepare.msg.proposal); - } - ); + tracing::info!("Produce initial block."); + util.produce_block(ctx).await; + tracing::info!("Make leader propose the next block."); + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!("Modify the proposal.number so that it doesn't match the previous block"); + leader_prepare.proposal.number = rng.gen(); + let res = util.process_leader_prepare(ctx, util.sign(leader_prepare.clone())).await; + assert_matches!(res, Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::BadBlockNumber { got, want } + )) => { + assert_eq!(want, leader_prepare.justification.high_qc().unwrap().message.proposal.number.next()); + assert_eq!(got, leader_prepare.proposal.number); + }); Ok(()) }).await.unwrap(); } @@ -425,29 +400,34 @@ async fn leader_prepare_reproposal_without_quorum() { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - let replica_prepare = util.new_replica_prepare(|_| {}).msg; - let mut leader_prepare = util - .process_replica_prepare_all(ctx, replica_prepare.clone()) - .await - .msg; - - // Turn leader_prepare into an unjustified reproposal. - let replica_prepares: Vec<_> = util - .keys - .iter() - .map(|k| { - let mut msg = replica_prepare.clone(); - msg.high_vote = rng.gen(); - k.sign_msg(msg) - }) - .collect(); - leader_prepare.justification = - PrepareQC::from(&replica_prepares, &util.validator_set()).unwrap(); - leader_prepare.proposal_payload = None; - - let leader_prepare = util.keys[0].sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::ReproposalWithoutQuorum)); + tracing::info!("make leader repropose a block"); + util.new_leader_commit(ctx).await; + util.process_replica_timeout(ctx).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!("modify justification, to make reproposal unjustified"); + let mut replica_prepare: ReplicaPrepare = leader_prepare + .justification + .map + .keys() + .next() + .unwrap() + .clone(); + leader_prepare.justification = PrepareQC::new(leader_prepare.justification.view); + for key in &util.keys { + replica_prepare.high_vote.as_mut().unwrap().proposal.payload = rng.gen(); + leader_prepare + .justification + .add(&key.sign_msg(replica_prepare.clone()), util.genesis()); + } + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; + assert_matches!( + res, + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::ReproposalWithoutQuorum + )) + ); Ok(()) }) .await @@ -462,11 +442,28 @@ async fn leader_prepare_reproposal_when_finalized() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; + tracing::info!("Make leader propose a new block"); + util.produce_block(ctx).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!( + "Modify the message so that it is actually a reproposal of the previous block" + ); + leader_prepare.proposal = leader_prepare + .justification + .high_qc() + .unwrap() + .message + .proposal; leader_prepare.proposal_payload = None; - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::ReproposalWhenFinalized)); + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; + assert_matches!( + res, + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::ReproposalWhenFinalized + )) + ); Ok(()) }) .await @@ -477,40 +474,33 @@ async fn leader_prepare_reproposal_when_finalized() { async fn leader_prepare_reproposal_invalid_block() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_prepare = util.new_leader_prepare(ctx).await.msg; - leader_prepare.justification = util.new_prepare_qc(|msg| msg.high_vote = ctx.rng().gen()); - leader_prepare.proposal_payload = None; - let leader_prepare = util.owner_key().sign_msg(leader_prepare); - let res = util.process_leader_prepare(ctx, leader_prepare).await; - assert_matches!(res, Err(leader_prepare::Error::ReproposalInvalidBlock)); - Ok(()) - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn leader_commit_sanity() { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - scope::run!(ctx, |ctx, s| async { - let (mut util, runner) = UTHarness::new_many(ctx).await; - s.spawn_bg(runner.run(ctx)); - - let leader_commit = util.new_leader_commit(ctx).await; - util.process_leader_commit(ctx, leader_commit) - .await - .unwrap(); + tracing::info!("Make leader repropose a block."); + util.new_leader_commit(ctx).await; + util.process_replica_timeout(ctx).await; + let mut leader_prepare = util.new_leader_prepare(ctx).await; + tracing::info!("Make the reproposal different than expected"); + leader_prepare.proposal.payload = rng.gen(); + let res = util + .process_leader_prepare(ctx, util.sign(leader_prepare)) + .await; + assert_matches!( + res, + Err(leader_prepare::Error::InvalidMessage( + validator::LeaderPrepareVerifyError::ReproposalBadBlock + )) + ); Ok(()) }) .await .unwrap(); } +/// Check that replica provides expecte high_vote and high_qc after finalizing a block. #[tokio::test] async fn leader_commit_sanity_yield_replica_prepare() { zksync_concurrency::testonly::abort_on_panic(); @@ -521,16 +511,17 @@ async fn leader_commit_sanity_yield_replica_prepare() { let leader_commit = util.new_leader_commit(ctx).await; let replica_prepare = util - .process_leader_commit(ctx, leader_commit.clone()) + .process_leader_commit(ctx, util.sign(leader_commit.clone())) .await .unwrap(); + let mut view = leader_commit.justification.message.view.clone(); + view.number = view.number.next(); assert_eq!( replica_prepare.msg, ReplicaPrepare { - protocol_version: leader_commit.msg.protocol_version, - view: leader_commit.msg.justification.message.view.next(), - high_vote: leader_commit.msg.justification.message, - high_qc: leader_commit.msg.justification, + view, + high_vote: Some(leader_commit.justification.message.clone()), + high_qc: Some(leader_commit.justification), } ); Ok(()) @@ -548,11 +539,9 @@ async fn leader_commit_incompatible_protocol_version() { s.spawn_bg(runner.run(ctx)); let incompatible_protocol_version = util.incompatible_protocol_version(); - let mut leader_commit = util.new_leader_commit(ctx).await.msg; - leader_commit.protocol_version = incompatible_protocol_version; - let res = util - .process_leader_commit(ctx, util.owner_key().sign_msg(leader_commit)) - .await; + let mut leader_commit = util.new_leader_commit(ctx).await; + leader_commit.justification.message.view.protocol_version = incompatible_protocol_version; + let res = util.process_leader_commit(ctx, util.sign(leader_commit)).await; assert_matches!( res, Err(leader_commit::Error::IncompatibleProtocolVersion { message_version, local_version }) => { @@ -565,21 +554,18 @@ async fn leader_commit_incompatible_protocol_version() { } #[tokio::test] -async fn leader_commit_invalid_leader() { +async fn leader_commit_bad_leader() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 2).await; s.spawn_bg(runner.run(ctx)); - - let current_view_leader = util.view_leader(util.replica.view); - assert_ne!(current_view_leader, util.owner_key().public()); - - let leader_commit = util.new_leader_commit(ctx).await.msg; + let leader_commit = util.new_leader_commit(ctx).await; + // Sign the leader_prepare with a key of different validator. let res = util .process_leader_commit(ctx, util.keys[1].sign_msg(leader_commit)) .await; - assert_matches!(res, Err(leader_commit::Error::InvalidLeader { .. })); + assert_matches!(res, Err(leader_commit::Error::BadLeader { .. })); Ok(()) }) .await @@ -594,8 +580,8 @@ async fn leader_commit_invalid_sig() { scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - - let mut leader_commit = util.new_leader_commit(ctx).await; + let leader_commit = util.new_leader_commit(ctx).await; + let mut leader_commit = util.sign(leader_commit); leader_commit.sig = rng.gen(); let res = util.process_leader_commit(ctx, leader_commit).await; assert_matches!(res, Err(leader_commit::Error::InvalidSignature { .. })); @@ -614,12 +600,17 @@ async fn leader_commit_invalid_commit_qc() { let (mut util, runner) = UTHarness::new(ctx, 1).await; s.spawn_bg(runner.run(ctx)); - let mut leader_commit = util.new_leader_commit(ctx).await.msg; - leader_commit.justification = rng.gen(); + let mut leader_commit = util.new_leader_commit(ctx).await; + leader_commit.justification.signature = rng.gen(); let res = util - .process_leader_commit(ctx, util.owner_key().sign_msg(leader_commit)) + .process_leader_commit(ctx, util.sign(leader_commit)) .await; - assert_matches!(res, Err(leader_commit::Error::InvalidJustification { .. })); + assert_matches!( + res, + Err(leader_commit::Error::InvalidMessage( + validator::CommitQCVerifyError::BadSignature(..) + )) + ); Ok(()) }) .await diff --git a/node/actors/bft/src/replica/timer.rs b/node/actors/bft/src/replica/timer.rs index df5ef3b3..0cbae88c 100644 --- a/node/actors/bft/src/replica/timer.rs +++ b/node/actors/bft/src/replica/timer.rs @@ -2,17 +2,21 @@ use super::StateMachine; use crate::metrics; use tracing::instrument; use zksync_concurrency::{ctx, metrics::LatencyGaugeExt as _, time}; +use zksync_consensus_roles::validator; impl StateMachine { /// The base duration of the timeout. - pub(crate) const BASE_DURATION: time::Duration = time::Duration::milliseconds(1000); + pub(crate) const BASE_DURATION: time::Duration = time::Duration::milliseconds(2000); /// Resets the timer. On every timeout we double the duration, starting from a given base duration. /// This is a simple exponential backoff. #[instrument(level = "trace", ret)] pub(crate) fn reset_timer(&mut self, ctx: &ctx::Ctx) { - let timeout = - Self::BASE_DURATION * 2u32.pow((self.view.0 - self.high_qc.message.view.0) as u32); + let final_view = match self.high_qc.as_ref() { + Some(qc) => qc.view().number.next(), + None => validator::ViewNumber(0), + }; + let timeout = Self::BASE_DURATION * 2u32.pow((self.view.0 - final_view.0) as u32); metrics::METRICS.replica_view_timeout.set_latency(timeout); self.timeout_deadline = time::Deadline::Finite(ctx.now() + timeout); diff --git a/node/actors/bft/src/testonly/fuzz.rs b/node/actors/bft/src/testonly/fuzz.rs index 2a48b72e..077be553 100644 --- a/node/actors/bft/src/testonly/fuzz.rs +++ b/node/actors/bft/src/testonly/fuzz.rs @@ -1,5 +1,9 @@ use crate::testonly::node::MAX_PAYLOAD_SIZE; -use rand::{seq::SliceRandom, Rng}; +use rand::{ + distributions::{Distribution, Standard}, + seq::SliceRandom, + Rng, +}; use zksync_consensus_roles::validator; /// Trait that allows us to mutate types. It's an approach to fuzzing that instead of starting with completely random inputs @@ -11,6 +15,19 @@ pub(crate) trait Fuzz { fn mutate(&mut self, rng: &mut impl Rng); } +impl Fuzz for Option +where + Standard: Distribution, +{ + fn mutate(&mut self, rng: &mut impl Rng) { + if let Some(v) = self.as_mut() { + v.mutate(rng); + } else { + *self = Some(rng.gen()); + } + } +} + impl Fuzz for validator::Signed { fn mutate(&mut self, rng: &mut impl Rng) { // We give them different weights because we want to mutate the message more often. @@ -36,33 +53,29 @@ impl Fuzz for validator::ConsensusMsg { impl Fuzz for validator::ReplicaPrepare { fn mutate(&mut self, rng: &mut impl Rng) { - match rng.gen_range(0..4) { + match rng.gen_range(0..3) { 0 => self.view = rng.gen(), 1 => self.high_vote.mutate(rng), - 2 => self.high_qc.mutate(rng), - 3 => self.protocol_version = rng.gen(), - _ => unreachable!(), + _ => self.high_qc.mutate(rng), } } } impl Fuzz for validator::ReplicaCommit { fn mutate(&mut self, rng: &mut impl Rng) { - match rng.gen_range(0..3) { + match rng.gen_range(0..2) { 0 => self.view = rng.gen(), - 1 => self.proposal.mutate(rng), - 2 => self.protocol_version = rng.gen(), - _ => unreachable!(), + _ => self.proposal.mutate(rng), } } } +// TODO: why payload is not fuzzed? impl Fuzz for validator::LeaderPrepare { fn mutate(&mut self, rng: &mut impl Rng) { - match rng.gen_range(0..3) { + match rng.gen_range(0..2) { 0 => self.proposal.mutate(rng), 1 => self.justification.mutate(rng), - 2 => self.protocol_version = rng.gen(), _ => unreachable!(), } } @@ -70,11 +83,7 @@ impl Fuzz for validator::LeaderPrepare { impl Fuzz for validator::LeaderCommit { fn mutate(&mut self, rng: &mut impl Rng) { - match rng.gen_range(0..2) { - 0 => self.justification.mutate(rng), - 1 => self.protocol_version = rng.gen(), - _ => unreachable!(), - } + self.justification.mutate(rng); } } diff --git a/node/actors/bft/src/testonly/node.rs b/node/actors/bft/src/testonly/node.rs index 6b221818..d6a25774 100644 --- a/node/actors/bft/src/testonly/node.rs +++ b/node/actors/bft/src/testonly/node.rs @@ -63,11 +63,9 @@ impl Node { let con_send = consensus_pipe.send; scope::run!(ctx, |ctx, s| async { s.spawn(async { - let validator_key = self.net.consensus.as_ref().unwrap().key.clone(); - let validator_set = self.net.validators.clone(); + let validator_key = self.net.validator_key.clone().unwrap(); crate::Config { secret_key: validator_key.clone(), - validator_set, block_store: self.block_store.clone(), replica_store: Box::new(in_memory::ReplicaStore::default()), payload_manager: self.behavior.payload_manager(), diff --git a/node/actors/bft/src/testonly/run.rs b/node/actors/bft/src/testonly/run.rs index e8f479a6..693f85ca 100644 --- a/node/actors/bft/src/testonly/run.rs +++ b/node/actors/bft/src/testonly/run.rs @@ -25,13 +25,13 @@ impl Test { /// Run a test with the given parameters. pub(crate) async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, self.nodes.len()); + let setup = validator::testonly::Setup::new(rng, self.nodes.len()); let nets: Vec<_> = network::testonly::new_configs(rng, &setup, 1); let mut nodes = vec![]; let mut honest = vec![]; scope::run!(ctx, |ctx, s| async { for (i, net) in nets.into_iter().enumerate() { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); if self.nodes[i] == Behavior::Honest { honest.push(store.clone()); @@ -46,10 +46,11 @@ impl Test { s.spawn_bg(run_nodes(ctx, self.network, &nodes)); // Run the nodes until all honest nodes store enough finalized blocks. - let want_block = validator::BlockNumber(self.blocks_to_finalize as u64); + let first = setup.genesis.fork.first_block; + let want_next = validator::BlockNumber(first.0 + self.blocks_to_finalize as u64); for store in &honest { sync::wait_for(ctx, &mut store.subscribe(), |state| { - state.next() > want_block + state.next() > want_next }) .await?; } @@ -76,6 +77,7 @@ async fn run_nodes(ctx: &ctx::Ctx, network: Network, specs: &[Node]) -> anyhow:: let mut nodes = vec![]; for (i, spec) in specs.iter().enumerate() { let (node, runner) = network::testonly::Instance::new( + ctx, spec.net.clone(), spec.block_store.clone(), ); @@ -99,7 +101,7 @@ async fn run_nodes(ctx: &ctx::Ctx, network: Network, specs: &[Node]) -> anyhow:: let mut recvs = vec![]; for (i, spec) in specs.iter().enumerate() { let (actor_pipe, pipe) = pipe::new(); - let key = spec.net.consensus.as_ref().unwrap().key.public(); + let key = spec.net.validator_key.as_ref().unwrap().public(); sends.insert(key, actor_pipe.send); recvs.push(actor_pipe.recv); s.spawn( diff --git a/node/actors/bft/src/testonly/ut_harness.rs b/node/actors/bft/src/testonly/ut_harness.rs index 22eda0a1..8880788c 100644 --- a/node/actors/bft/src/testonly/ut_harness.rs +++ b/node/actors/bft/src/testonly/ut_harness.rs @@ -1,9 +1,9 @@ use crate::{ io::OutputMessage, leader, - leader::{ReplicaCommitError, ReplicaPrepareError}, + leader::{replica_commit, replica_prepare}, replica, - replica::{LeaderCommitError, LeaderPrepareError}, + replica::{leader_commit, leader_prepare}, testonly, Config, PayloadManager, }; use assert_matches::assert_matches; @@ -55,13 +55,12 @@ impl UTHarness { payload_manager: Box, ) -> (UTHarness, BlockStoreRunner) { let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, num_validators); - let (block_store, runner) = new_store(ctx, &setup.blocks[0]).await; + let setup = validator::testonly::Setup::new(rng, num_validators); + let (block_store, runner) = new_store(ctx, &setup.genesis).await; let (send, recv) = ctx::channel::unbounded(); let cfg = Arc::new(Config { secret_key: setup.keys[0].clone(), - validator_set: setup.validator_set(), block_store: block_store.clone(), replica_store: Box::new(in_memory::ReplicaStore::default()), payload_manager, @@ -75,7 +74,7 @@ impl UTHarness { leader, replica, pipe: recv, - keys: setup.keys, + keys: setup.keys.clone(), }; let _: Signed = this.try_recv().unwrap(); (this, runner) @@ -84,7 +83,7 @@ impl UTHarness { /// Creates a new `UTHarness` with minimally-significant validator set size. pub(crate) async fn new_many(ctx: &ctx::Ctx) -> (UTHarness, BlockStoreRunner) { let num_validators = 6; - assert!(crate::misc::faulty_replicas(num_validators) > 0); + assert!(validator::faulty_replicas(num_validators) > 0); UTHarness::new(ctx, num_validators).await } @@ -93,16 +92,23 @@ impl UTHarness { /// recovers after a timeout. pub(crate) async fn produce_block_after_timeout(&mut self, ctx: &ctx::Ctx) { let want = ReplicaPrepare { - protocol_version: self.protocol_version(), - view: self.replica.view.next(), + view: validator::View { + protocol_version: self.protocol_version(), + fork: self.genesis().fork.number, + number: self.replica.view.next(), + }, high_qc: self.replica.high_qc.clone(), - high_vote: self.replica.high_vote, + high_vote: self.replica.high_vote.clone(), }; let replica_prepare = self.process_replica_timeout(ctx).await; assert_eq!(want, replica_prepare.msg); + self.produce_block(ctx).await; + } - let leader_commit = self.new_leader_commit(ctx).await; - self.process_leader_commit(ctx, leader_commit) + /// Produces a block, by executing the full view. + pub(crate) async fn produce_block(&mut self, ctx: &ctx::Ctx) { + let msg = self.new_leader_commit(ctx).await; + self.process_leader_commit(ctx, self.sign(msg)) .await .unwrap(); } @@ -119,6 +125,10 @@ impl UTHarness { &self.replica.config.secret_key } + pub(crate) fn sign>(&self, msg: V) -> Signed { + self.replica.config.secret_key.sign_msg(msg) + } + pub(crate) fn set_owner_as_view_leader(&mut self) { let mut view = self.replica.view; while self.view_leader(view) != self.owner_key().public() { @@ -140,57 +150,53 @@ impl UTHarness { self.replica.view = view } - pub(crate) fn new_replica_prepare( - &mut self, - mutate_fn: impl FnOnce(&mut ReplicaPrepare), - ) -> Signed { - self.set_owner_as_view_leader(); - let mut msg = ReplicaPrepare { + pub(crate) fn replica_view(&self) -> validator::View { + validator::View { protocol_version: self.protocol_version(), - view: self.replica.view, - high_vote: self.replica.high_vote, + fork: self.genesis().fork.number, + number: self.replica.view, + } + } + + pub(crate) fn new_replica_prepare(&mut self) -> ReplicaPrepare { + self.set_owner_as_view_leader(); + ReplicaPrepare { + view: self.replica_view(), + high_vote: self.replica.high_vote.clone(), high_qc: self.replica.high_qc.clone(), - }; - mutate_fn(&mut msg); - self.owner_key().sign_msg(msg) + } } - pub(crate) fn new_current_replica_commit( - &self, - mutate_fn: impl FnOnce(&mut ReplicaCommit), - ) -> Signed { - let mut msg = ReplicaCommit { - protocol_version: self.protocol_version(), - view: self.replica.view, - proposal: self.replica.high_qc.message.proposal, - }; - mutate_fn(&mut msg); - self.owner_key().sign_msg(msg) + pub(crate) fn new_current_replica_commit(&self) -> ReplicaCommit { + ReplicaCommit { + view: self.replica_view(), + proposal: self.replica.high_qc.as_ref().unwrap().message.proposal, + } } - pub(crate) async fn new_leader_prepare(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_prepare = self.new_replica_prepare(|_| {}).msg; - self.process_replica_prepare_all(ctx, replica_prepare).await + pub(crate) async fn new_leader_prepare(&mut self, ctx: &ctx::Ctx) -> LeaderPrepare { + let msg = self.new_replica_prepare(); + self.process_replica_prepare_all(ctx, msg).await.msg } - pub(crate) async fn new_replica_commit(&mut self, ctx: &ctx::Ctx) -> Signed { - let leader_prepare = self.new_leader_prepare(ctx).await; - self.process_leader_prepare(ctx, leader_prepare) + pub(crate) async fn new_replica_commit(&mut self, ctx: &ctx::Ctx) -> ReplicaCommit { + let msg = self.new_leader_prepare(ctx).await; + self.process_leader_prepare(ctx, self.sign(msg)) .await .unwrap() + .msg } - pub(crate) async fn new_leader_commit(&mut self, ctx: &ctx::Ctx) -> Signed { - let replica_commit = self.new_replica_commit(ctx).await; - self.process_replica_commit_all(ctx, replica_commit.msg) - .await + pub(crate) async fn new_leader_commit(&mut self, ctx: &ctx::Ctx) -> LeaderCommit { + let msg = self.new_replica_commit(ctx).await; + self.process_replica_commit_all(ctx, msg).await.msg } pub(crate) async fn process_leader_prepare( &mut self, ctx: &ctx::Ctx, msg: Signed, - ) -> Result, LeaderPrepareError> { + ) -> Result, leader_prepare::Error> { self.replica.process_leader_prepare(ctx, msg).await?; Ok(self.try_recv().unwrap()) } @@ -199,7 +205,7 @@ impl UTHarness { &mut self, ctx: &ctx::Ctx, msg: Signed, - ) -> Result, LeaderCommitError> { + ) -> Result, leader_commit::Error> { self.replica.process_leader_commit(ctx, msg).await?; Ok(self.try_recv().unwrap()) } @@ -209,7 +215,7 @@ impl UTHarness { &mut self, ctx: &ctx::Ctx, msg: Signed, - ) -> Result>, ReplicaPrepareError> { + ) -> Result>, replica_prepare::Error> { let prepare_qc = self.leader.prepare_qc.subscribe(); self.leader.process_replica_prepare(ctx, msg).await?; if prepare_qc.has_changed().unwrap() { @@ -231,7 +237,7 @@ impl UTHarness { ctx: &ctx::Ctx, msg: ReplicaPrepare, ) -> Signed { - let want_threshold = self.replica.config.threshold(); + let want_threshold = self.genesis().validators.threshold(); let mut leader_prepare = None; let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); for (i, msg) in msgs.into_iter().enumerate() { @@ -239,7 +245,7 @@ impl UTHarness { match (i + 1).cmp(&want_threshold) { Ordering::Equal => leader_prepare = res.unwrap(), Ordering::Less => assert!(res.unwrap().is_none()), - Ordering::Greater => assert_matches!(res, Err(ReplicaPrepareError::Old { .. })), + Ordering::Greater => assert_matches!(res, Err(replica_prepare::Error::Old { .. })), } } leader_prepare.unwrap() @@ -249,7 +255,7 @@ impl UTHarness { &mut self, ctx: &ctx::Ctx, msg: Signed, - ) -> Result>, ReplicaCommitError> { + ) -> Result>, replica_commit::Error> { self.leader.process_replica_commit(ctx, msg)?; Ok(self.try_recv()) } @@ -260,12 +266,14 @@ impl UTHarness { msg: ReplicaCommit, ) -> Signed { for (i, key) in self.keys.iter().enumerate() { - let res = self.leader.process_replica_commit(ctx, key.sign_msg(msg)); - let want_threshold = self.replica.config.threshold(); + let res = self + .leader + .process_replica_commit(ctx, key.sign_msg(msg.clone())); + let want_threshold = self.genesis().validators.threshold(); match (i + 1).cmp(&want_threshold) { Ordering::Equal => res.unwrap(), Ordering::Less => res.unwrap(), - Ordering::Greater => assert_matches!(res, Err(ReplicaCommitError::Old { .. })), + Ordering::Greater => assert_matches!(res, Err(replica_commit::Error::Old { .. })), } } self.try_recv().unwrap() @@ -292,25 +300,33 @@ impl UTHarness { } pub(crate) fn view_leader(&self, view: ViewNumber) -> validator::PublicKey { - self.replica.config.view_leader(view) + self.genesis().validators.view_leader(view) } - pub(crate) fn validator_set(&self) -> validator::ValidatorSet { - validator::ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap() + pub(crate) fn genesis(&self) -> &validator::Genesis { + self.replica.config.genesis() } pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC { - let msg = self.new_current_replica_commit(mutate_fn).msg; - let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg)).collect(); - CommitQC::from(&msgs, &self.validator_set()).unwrap() + let mut msg = self.new_current_replica_commit(); + mutate_fn(&mut msg); + let mut qc = CommitQC::new(msg, self.genesis()); + for key in &self.keys { + qc.add(&key.sign_msg(qc.message.clone()), self.genesis()); + } + qc } pub(crate) fn new_prepare_qc( &mut self, mutate_fn: impl FnOnce(&mut ReplicaPrepare), ) -> PrepareQC { - let msg = self.new_replica_prepare(mutate_fn).msg; - let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect(); - PrepareQC::from(&msgs, &self.validator_set()).unwrap() + let mut msg = self.new_replica_prepare(); + mutate_fn(&mut msg); + let mut qc = PrepareQC::new(msg.view.clone()); + for key in &self.keys { + qc.add(&key.sign_msg(msg.clone()), self.genesis()); + } + qc } } diff --git a/node/actors/bft/src/tests.rs b/node/actors/bft/src/tests.rs index 0ca95e7f..e6bb83b5 100644 --- a/node/actors/bft/src/tests.rs +++ b/node/actors/bft/src/tests.rs @@ -1,17 +1,15 @@ -use crate::{ - misc::consensus_threshold, - testonly::{ut_harness::UTHarness, Behavior, Network, Test}, -}; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator::Phase; +use crate::testonly::{ut_harness::UTHarness, Behavior, Network, Test}; +use zksync_concurrency::{ctx, scope, time}; +use zksync_consensus_roles::validator; async fn run_test(behavior: Behavior, network: Network) { + let _guard = zksync_concurrency::testonly::set_timeout(time::Duration::seconds(20)); zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(1.)); + let ctx = &ctx::test_root(&ctx::RealClock); const NODES: usize = 11; let mut nodes = vec![behavior; NODES]; - for n in &mut nodes[0..consensus_threshold(NODES)] { + for n in &mut nodes[0..validator::threshold(NODES)] { *n = Behavior::Honest; } Test { @@ -64,7 +62,7 @@ async fn byzantine_real_network() { run_test(Behavior::Byzantine, Network::Real).await } -// Testing liveness after the network becomes idle with leader having no cached prepare messages for the current view. +/// Testing liveness after the network becomes idle with leader having no cached prepare messages for the current view. #[tokio::test] async fn timeout_leader_no_prepares() { zksync_concurrency::testonly::abort_on_panic(); @@ -72,8 +70,7 @@ async fn timeout_leader_no_prepares() { scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - - util.new_replica_prepare(|_| {}); + util.new_replica_prepare(); util.produce_block_after_timeout(ctx).await; Ok(()) }) @@ -89,10 +86,9 @@ async fn timeout_leader_some_prepares() { scope::run!(ctx, |ctx, s| async { let (mut util, runner) = UTHarness::new_many(ctx).await; s.spawn_bg(runner.run(ctx)); - - let replica_prepare = util.new_replica_prepare(|_| {}); + let replica_prepare = util.new_replica_prepare(); assert!(util - .process_replica_prepare(ctx, replica_prepare) + .process_replica_prepare(ctx, util.sign(replica_prepare)) .await .unwrap() .is_none()); @@ -114,7 +110,7 @@ async fn timeout_leader_in_commit() { util.new_leader_prepare(ctx).await; // Leader is in `Phase::Commit`, but should still accept prepares from newer views. - assert_eq!(util.leader.phase, Phase::Commit); + assert_eq!(util.leader.phase, validator::Phase::Commit); util.produce_block_after_timeout(ctx).await; Ok(()) }) @@ -133,7 +129,7 @@ async fn timeout_replica_in_commit() { util.new_replica_commit(ctx).await; // Leader is in `Phase::Commit`, but should still accept prepares from newer views. - assert_eq!(util.leader.phase, Phase::Commit); + assert_eq!(util.leader.phase, validator::Phase::Commit); util.produce_block_after_timeout(ctx).await; Ok(()) }) @@ -152,12 +148,12 @@ async fn timeout_leader_some_commits() { let replica_commit = util.new_replica_commit(ctx).await; assert!(util - .process_replica_commit(ctx, replica_commit) + .process_replica_commit(ctx, util.sign(replica_commit)) .await .unwrap() .is_none()); // Leader is in `Phase::Commit`, but should still accept prepares from newer views. - assert_eq!(util.leader_phase(), Phase::Commit); + assert_eq!(util.leader_phase(), validator::Phase::Commit); util.produce_block_after_timeout(ctx).await; Ok(()) }) diff --git a/node/actors/executor/src/lib.rs b/node/actors/executor/src/lib.rs index 08065a2a..2e0fad85 100644 --- a/node/actors/executor/src/lib.rs +++ b/node/actors/executor/src/lib.rs @@ -6,7 +6,7 @@ use std::{ fmt, sync::Arc, }; -use zksync_concurrency::{ctx, net, scope}; +use zksync_concurrency::{ctx, net, scope, time}; use zksync_consensus_bft as bft; use zksync_consensus_network as network; use zksync_consensus_roles::{node, validator}; @@ -16,16 +16,13 @@ use zksync_consensus_utils::pipe; use zksync_protobuf::kB; mod io; -pub mod testonly; #[cfg(test)] mod tests; -pub use network::consensus::Config as ValidatorConfig; - /// Validator-related part of [`Executor`]. pub struct Validator { /// Consensus network configuration. - pub config: ValidatorConfig, + pub key: validator::SecretKey, /// Store for replica state. pub replica_store: Box, /// Payload manager. @@ -35,7 +32,7 @@ pub struct Validator { impl fmt::Debug for Validator { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("ValidatorExecutor") - .field("config", &self.config) + .field("key", &self.key) .finish() } } @@ -46,9 +43,9 @@ pub struct Config { /// IP:port to listen on, for incoming TCP connections. /// Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). pub server_addr: std::net::SocketAddr, - /// Static specification of validators for Proof of Authority. Should be deprecated once we move - /// to Proof of Stake. - pub validators: validator::ValidatorSet, + /// Public TCP address that other nodes are expected to connect to. + /// It is announced over gossip network. + pub public_addr: std::net::SocketAddr, /// Maximal size of the block payload. pub max_payload_size: usize, @@ -67,8 +64,8 @@ pub struct Config { impl Config { /// Returns gossip network configuration. - pub(crate) fn gossip(&self) -> network::gossip::Config { - network::gossip::Config { + pub(crate) fn gossip(&self) -> network::GossipConfig { + network::GossipConfig { key: self.node_key.clone(), dynamic_inbound_limit: self.gossip_dynamic_inbound_limit, static_inbound: self.gossip_static_inbound.clone(), @@ -93,11 +90,12 @@ impl Executor { fn network_config(&self) -> network::Config { network::Config { server_addr: net::tcp::ListenerAddr::new(self.config.server_addr), - validators: self.config.validators.clone(), + public_addr: self.config.public_addr, gossip: self.config.gossip(), - consensus: self.validator.as_ref().map(|v| v.config.clone()), - enable_pings: true, + validator_key: self.validator.as_ref().map(|v| v.key.clone()), + ping_timeout: Some(time::Duration::seconds(10)), max_block_size: self.config.max_payload_size.saturating_add(kB), + rpc: network::RpcConfig::default(), } } @@ -105,10 +103,11 @@ impl Executor { fn verify(&self) -> anyhow::Result<()> { if let Some(validator) = self.validator.as_ref() { if !self - .config + .block_store + .genesis() .validators .iter() - .any(|key| key == &validator.config.key.public()) + .any(|key| key == &validator.key.public()) { anyhow::bail!("this validator doesn't belong to the consensus"); } @@ -132,26 +131,24 @@ impl Executor { network_dispatcher_pipe, ); - // Create each of the actors. - let validator_set = self.config.validators; - tracing::debug!("Starting actors in separate threads."); scope::run!(ctx, |ctx, s| async { s.spawn_blocking(|| dispatcher.run(ctx).context("IO Dispatcher stopped")); s.spawn(async { - let state = network::State::new(network_config, self.block_store.clone(), None) - .context("Invalid network config")?; - state.register_metrics(); - network::run_network(ctx, state, network_actor_pipe) - .await - .context("Network stopped") + let (net, runner) = network::Network::new( + ctx, + network_config, + self.block_store.clone(), + network_actor_pipe, + ); + net.register_metrics(); + runner.run(ctx).await.context("Network stopped") }); if let Some(validator) = self.validator { s.spawn(async { let validator = validator; bft::Config { - secret_key: validator.config.key.clone(), - validator_set: validator_set.clone(), + secret_key: validator.key.clone(), block_store: self.block_store.clone(), replica_store: validator.replica_store, payload_manager: validator.payload_manager, @@ -162,13 +159,10 @@ impl Executor { .context("Consensus stopped") }); } - sync_blocks::Config::new( - validator_set.clone(), - bft::misc::consensus_threshold(validator_set.len()), - )? - .run(ctx, sync_blocks_actor_pipe, self.block_store.clone()) - .await - .context("Syncing blocks stopped") + sync_blocks::Config::new() + .run(ctx, sync_blocks_actor_pipe, self.block_store.clone()) + .await + .context("Syncing blocks stopped") }) .await } diff --git a/node/actors/executor/src/testonly.rs b/node/actors/executor/src/testonly.rs deleted file mode 100644 index b40caa66..00000000 --- a/node/actors/executor/src/testonly.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Testing extensions for node executor. -use crate::{Config, ValidatorConfig}; -use rand::Rng; -use zksync_concurrency::net; -use zksync_consensus_network as network; -use zksync_consensus_roles::validator::testonly::GenesisSetup; - -/// Full validator configuration. -#[derive(Debug, Clone)] -#[non_exhaustive] -pub struct ValidatorNode { - /// Full node configuration. - pub node: Config, - /// Consensus configuration of the validator. - pub validator: ValidatorConfig, - /// Genesis configuration (validator set & initial blocks). - pub setup: GenesisSetup, -} - -/// Creates a new full node and configures this validator to accept incoming connections from it. -pub fn connect_full_node(rng: &mut impl Rng, node: &mut Config) -> Config { - let mut new = node.clone(); - new.server_addr = *net::tcp::testonly::reserve_listener(); - new.node_key = rng.gen(); - new.gossip_static_outbound = [(node.node_key.public(), node.server_addr)].into(); - node.gossip_static_inbound.insert(new.node_key.public()); - new -} - -impl ValidatorNode { - /// Generates a validator config for a network with a single validator. - pub fn new(rng: &mut impl Rng) -> Self { - let setup = GenesisSetup::new(rng, 1); - let net_config = network::testonly::new_configs(rng, &setup, 0) - .pop() - .unwrap(); - let validator = net_config.consensus.unwrap(); - let gossip = net_config.gossip; - Self { - node: Config { - server_addr: *net_config.server_addr, - validators: setup.validator_set(), - node_key: gossip.key, - gossip_dynamic_inbound_limit: gossip.dynamic_inbound_limit, - gossip_static_inbound: gossip.static_inbound, - gossip_static_outbound: gossip.static_outbound, - max_payload_size: usize::MAX, - }, - validator, - setup, - } - } -} diff --git a/node/actors/executor/src/tests.rs b/node/actors/executor/src/tests.rs index d85ca660..f95b6641 100644 --- a/node/actors/executor/src/tests.rs +++ b/node/actors/executor/src/tests.rs @@ -1,41 +1,31 @@ //! High-level tests for `Executor`. - use super::*; -use crate::testonly::{connect_full_node, ValidatorNode}; -use test_casing::test_casing; -use zksync_concurrency::{ - sync, - testonly::{abort_on_panic, set_timeout}, - time, -}; +use zksync_concurrency::testonly::abort_on_panic; use zksync_consensus_bft as bft; -use zksync_consensus_roles::validator::BlockNumber; +use zksync_consensus_network::testonly::{new_configs, new_fullnode}; +use zksync_consensus_roles::validator::{testonly::Setup, BlockNumber}; use zksync_consensus_storage::{ testonly::{in_memory, new_store}, BlockStore, }; -impl Config { - fn into_executor(self, block_store: Arc) -> Executor { - Executor { - config: self, - block_store, - validator: None, - } - } -} - -impl ValidatorNode { - fn into_executor(self, block_store: Arc) -> Executor { - Executor { - config: self.node, - block_store, - validator: Some(Validator { - config: self.validator, - replica_store: Box::new(in_memory::ReplicaStore::default()), - payload_manager: Box::new(bft::testonly::RandomPayload(1000)), - }), - } +fn make_executor(cfg: &network::Config, block_store: Arc) -> Executor { + Executor { + config: Config { + server_addr: *cfg.server_addr, + public_addr: cfg.public_addr, + max_payload_size: usize::MAX, + node_key: cfg.gossip.key.clone(), + gossip_dynamic_inbound_limit: cfg.gossip.dynamic_inbound_limit, + gossip_static_inbound: cfg.gossip.static_inbound.clone(), + gossip_static_outbound: cfg.gossip.static_outbound.clone(), + }, + block_store, + validator: cfg.validator_key.as_ref().map(|key| Validator { + key: key.clone(), + replica_store: Box::new(in_memory::ReplicaStore::default()), + payload_manager: Box::new(bft::testonly::RandomPayload(1000)), + }), } } @@ -45,15 +35,13 @@ async fn executing_single_validator() { let ctx = &ctx::root(); let rng = &mut ctx.rng(); - let validator = ValidatorNode::new(rng); - let (storage, runner) = new_store(ctx, &validator.setup.blocks[0]).await; - let executor = validator.into_executor(storage.clone()); - + let setup = Setup::new(rng, 1); + let cfgs = new_configs(rng, &setup, 0); scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); - s.spawn_bg(executor.run(ctx)); - let want = BlockNumber(5); - sync::wait_for(ctx, &mut storage.subscribe(), |state| state.next() > want).await?; + s.spawn_bg(make_executor(&cfgs[0], store.clone()).run(ctx)); + store.wait_until_persisted(ctx, BlockNumber(5)).await?; Ok(()) }) .await @@ -66,85 +54,22 @@ async fn executing_validator_and_full_node() { let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); let rng = &mut ctx.rng(); - let mut validator = ValidatorNode::new(rng); - let full_node = connect_full_node(rng, &mut validator.node); - let (validator_storage, validator_runner) = new_store(ctx, &validator.setup.blocks[0]).await; - let (full_node_storage, full_node_runner) = new_store(ctx, &validator.setup.blocks[0]).await; - - let validator = validator.into_executor(validator_storage.clone()); - let full_node = full_node.into_executor(full_node_storage.clone()); - - scope::run!(ctx, |ctx, s| async { - s.spawn_bg(validator_runner.run(ctx)); - s.spawn_bg(full_node_runner.run(ctx)); - s.spawn_bg(validator.run(ctx)); - s.spawn_bg(full_node.run(ctx)); - full_node_storage - .wait_until_persisted(ctx, BlockNumber(5)) - .await?; - Ok(()) - }) - .await - .unwrap(); -} - -#[test_casing(2, [false, true])] -#[tokio::test] -async fn syncing_full_node_from_snapshot(delay_block_storage: bool) { - abort_on_panic(); - let _guard = set_timeout(time::Duration::seconds(10)); - - let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); - let rng = &mut ctx.rng(); - - let mut validator = ValidatorNode::new(rng); - validator.setup.push_blocks(rng, 10); - let node2 = connect_full_node(rng, &mut validator.node); - - let (store1, store1_runner) = new_store(ctx, &validator.setup.blocks[0]).await; - // Node2 will start from a snapshot. - let (store2, store2_runner) = new_store(ctx, &validator.setup.blocks[4]).await; - - // We spawn 2 non-validator nodes. We will simulate blocks appearing in storage of node1, - // and will expect them to be propagated to node2. - let node1 = validator.node.into_executor(store1.clone()); - let node2 = Executor { - config: node2, - block_store: store2.clone(), - validator: None, - }; - + let setup = Setup::new(rng, 1); + let cfgs = new_configs(rng, &setup, 0); scope::run!(ctx, |ctx, s| async { - s.spawn_bg(store1_runner.run(ctx)); - s.spawn_bg(store2_runner.run(ctx)); - if !delay_block_storage { - // Instead of running consensus on the validator, add the generated blocks manually. - for block in &validator.setup.blocks[1..] { - store1.queue_block(ctx, block.clone()).await.unwrap(); - } - } - s.spawn_bg(node1.run(ctx)); - s.spawn_bg(node2.run(ctx)); - - if delay_block_storage { - // Emulate the validator gradually adding new blocks to the storage. - s.spawn_bg(async { - for block in &validator.setup.blocks[1..] { - ctx.sleep(time::Duration::milliseconds(500)).await?; - store1.queue_block(ctx, block.clone()).await?; - } - Ok(()) - }); - } + // Spawn validator. + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(make_executor(&cfgs[0], store).run(ctx)); - store2.wait_until_persisted(ctx, BlockNumber(10)).await?; + // Spawn full node. + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(make_executor(&new_fullnode(rng, &cfgs[0]), store.clone()).run(ctx)); - // Check that the node didn't receive any blocks with number lesser than the initial snapshot block. - for lesser_block_number in 0..3 { - let block = store2.block(ctx, BlockNumber(lesser_block_number)).await?; - assert!(block.is_none()); - } - anyhow::Ok(()) + // Wait for blocks in full node store. + store.wait_until_persisted(ctx, BlockNumber(5)).await?; + Ok(()) }) .await .unwrap(); diff --git a/node/actors/network/Cargo.toml b/node/actors/network/Cargo.toml index 0edd257c..e1e835a9 100644 --- a/node/actors/network/Cargo.toml +++ b/node/actors/network/Cargo.toml @@ -27,6 +27,7 @@ tracing.workspace = true vise.workspace = true [dev-dependencies] +assert_matches.workspace = true pretty_assertions.workspace = true test-casing.workspace = true tokio.workspace = true diff --git a/node/actors/network/src/config.rs b/node/actors/network/src/config.rs new file mode 100644 index 00000000..27d6c498 --- /dev/null +++ b/node/actors/network/src/config.rs @@ -0,0 +1,82 @@ +//! Network actor configs. +use std::collections::{HashMap, HashSet}; +use zksync_concurrency::{limiter, net, time}; +use zksync_consensus_roles::{node, validator}; + +/// How often we should retry to establish a connection to a validator. +/// TODO(gprusak): once it becomes relevant, choose a more appropriate retry strategy. +pub(crate) const CONNECT_RETRY: time::Duration = time::Duration::seconds(20); + +/// Rate limiting config for RPCs. +#[derive(Debug, Clone)] +pub struct RpcConfig { + /// Max rate of sending/receiving push_validator_addrs messages. + pub push_validator_addrs_rate: limiter::Rate, + /// Max rate of sending/receiving push_block_store_state messages. + pub push_block_store_state_rate: limiter::Rate, + /// Max rate of sending/receiving get_block RPCs. + pub get_block_rate: limiter::Rate, + /// Max rate of sending/receiving consensus messages. + pub consensus_rate: limiter::Rate, +} + +impl Default for RpcConfig { + fn default() -> Self { + Self { + push_validator_addrs_rate: limiter::Rate { + burst: 1, + refresh: time::Duration::seconds(5), + }, + push_block_store_state_rate: limiter::Rate { + burst: 2, + refresh: time::Duration::milliseconds(500), + }, + get_block_rate: limiter::Rate { + burst: 10, + refresh: time::Duration::milliseconds(100), + }, + consensus_rate: limiter::Rate { + burst: 10, + refresh: time::Duration::ZERO, + }, + } + } +} + +/// Gossip network configuration. +#[derive(Debug, Clone)] +pub struct GossipConfig { + /// Private key of the node, every node should have one. + pub key: node::SecretKey, + /// Limit on the number of inbound connections outside + /// of the `static_inbound` set. + pub dynamic_inbound_limit: usize, + /// Inbound connections that should be unconditionally accepted. + pub static_inbound: HashSet, + /// Outbound connections that the node should actively try to + /// establish and maintain. + pub static_outbound: HashMap, +} + +/// Network actor config. +#[derive(Debug, Clone)] +pub struct Config { + /// TCP socket address to listen for inbound connections at. + pub server_addr: net::tcp::ListenerAddr, + /// Public TCP address that other nodes are expected to connect to. + /// It is announced over gossip network. + pub public_addr: std::net::SocketAddr, + /// Gossip network config. + pub gossip: GossipConfig, + /// Private key of the validator. + /// None if the node is NOT a validator. + pub validator_key: Option, + /// Maximal size of the proto-encoded `validator::FinalBlock` in bytes. + pub max_block_size: usize, + /// If a peer doesn't respond to a ping message within `ping_timeout`, + /// the connection is dropped. + /// `None` disables sending ping messages (useful for tests). + pub ping_timeout: Option, + /// Rate limiting config for RPCs. + pub rpc: RpcConfig, +} diff --git a/node/actors/network/src/consensus/handshake/mod.rs b/node/actors/network/src/consensus/handshake/mod.rs index 9536c2d3..06de4a4e 100644 --- a/node/actors/network/src/consensus/handshake/mod.rs +++ b/node/actors/network/src/consensus/handshake/mod.rs @@ -19,6 +19,9 @@ pub(crate) struct Handshake { /// Session ID signed with the validator key. /// Authenticates the peer to be the owner of the validator key. pub(crate) session_id: validator::Signed, + /// Hash of the blockchain genesis specification. + /// Only nodes with the same genesis belong to the same network. + pub(crate) genesis: validator::GenesisHash, } impl ProtoFmt for Handshake { @@ -26,11 +29,13 @@ impl ProtoFmt for Handshake { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { session_id: read_required(&r.session_id).context("session_id")?, + genesis: read_required(&r.genesis).context("genesis")?, }) } fn build(&self) -> Self::Proto { Self::Proto { session_id: Some(self.session_id.build()), + genesis: Some(self.genesis.build()), } } } @@ -38,6 +43,8 @@ impl ProtoFmt for Handshake { /// Error returned by handshake logic. #[derive(Debug, thiserror::Error)] pub(super) enum Error { + #[error("genesis mismatch")] + GenesisMismatch, #[error("session id mismatch")] SessionIdMismatch, #[error("unexpected peer")] @@ -51,6 +58,7 @@ pub(super) enum Error { pub(super) async fn outbound( ctx: &ctx::Ctx, me: &validator::SecretKey, + genesis: validator::GenesisHash, stream: &mut noise::Stream, peer: &validator::PublicKey, ) -> Result<(), Error> { @@ -61,6 +69,7 @@ pub(super) async fn outbound( stream, &Handshake { session_id: me.sign_msg(session_id.clone()), + genesis, }, ) .await @@ -68,6 +77,9 @@ pub(super) async fn outbound( let h: Handshake = frame::recv_proto(ctx, stream, Handshake::max_size()) .await .map_err(Error::Stream)?; + if h.genesis != genesis { + return Err(Error::GenesisMismatch); + } if h.session_id.msg != session_id { return Err(Error::SessionIdMismatch); } @@ -81,6 +93,7 @@ pub(super) async fn outbound( pub(super) async fn inbound( ctx: &ctx::Ctx, me: &validator::SecretKey, + genesis: validator::GenesisHash, stream: &mut noise::Stream, ) -> Result { let ctx = &ctx.with_timeout(TIMEOUT); @@ -88,6 +101,9 @@ pub(super) async fn inbound( let h: Handshake = frame::recv_proto(ctx, stream, Handshake::max_size()) .await .map_err(Error::Stream)?; + if h.genesis != genesis { + return Err(Error::GenesisMismatch); + } if h.session_id.msg != session_id.clone() { return Err(Error::SessionIdMismatch); } @@ -97,6 +113,7 @@ pub(super) async fn inbound( stream, &Handshake { session_id: me.sign_msg(session_id.clone()), + genesis, }, ) .await diff --git a/node/actors/network/src/consensus/handshake/testonly.rs b/node/actors/network/src/consensus/handshake/testonly.rs index d05848df..05d5d8de 100644 --- a/node/actors/network/src/consensus/handshake/testonly.rs +++ b/node/actors/network/src/consensus/handshake/testonly.rs @@ -15,6 +15,7 @@ impl Distribution for Standard { let session_id: node::SessionId = rng.gen(); Handshake { session_id: key.sign_msg(session_id), + genesis: rng.gen(), } } } diff --git a/node/actors/network/src/consensus/handshake/tests.rs b/node/actors/network/src/consensus/handshake/tests.rs index 23bd75cd..ce69744b 100644 --- a/node/actors/network/src/consensus/handshake/tests.rs +++ b/node/actors/network/src/consensus/handshake/tests.rs @@ -1,5 +1,6 @@ use super::*; use crate::{frame, noise, testonly}; +use assert_matches::assert_matches; use rand::Rng; use zksync_concurrency::{ctx, io, scope, testonly::abort_on_panic}; use zksync_consensus_roles::validator; @@ -20,6 +21,8 @@ async fn test_session_id_mismatch() { let key0: validator::SecretKey = rng.gen(); let key1: validator::SecretKey = rng.gen(); + let genesis: validator::GenesisHash = rng.gen(); + // MitM attempt detected on the inbound end. scope::run!(ctx, |ctx, s| async { let (s1, s2) = noise::testonly::pipe(ctx).await; @@ -36,14 +39,14 @@ async fn test_session_id_mismatch() { }); s.spawn(async { let mut s4 = s4; - match inbound(ctx, &key0, &mut s4).await { + match inbound(ctx, &key0, genesis, &mut s4).await { Err(Error::SessionIdMismatch) => Ok(()), res => panic!("unexpected res: {res:?}"), } }); s.spawn(async { let mut s1 = s1; - match outbound(ctx, &key1, &mut s1, &key0.public()).await { + match outbound(ctx, &key1, genesis, &mut s1, &key0.public()).await { Err(Error::Stream(..)) => Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -64,12 +67,13 @@ async fn test_session_id_mismatch() { &mut s2, &Handshake { session_id: key1.sign_msg(rng.gen::()), + genesis, }, ) .await?; Ok(()) }); - match outbound(ctx, &key0, &mut s1, &key1.public()).await { + match outbound(ctx, &key0, genesis, &mut s1, &key1.public()).await { Err(Error::SessionIdMismatch) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -88,16 +92,18 @@ async fn test_peer_mismatch() { let key1: validator::SecretKey = rng.gen(); let key2: validator::SecretKey = rng.gen(); + let genesis: validator::GenesisHash = rng.gen(); + scope::run!(ctx, |ctx, s| async { let (s0, s1) = noise::testonly::pipe(ctx).await; s.spawn(async { let mut s0 = s0; - assert_eq!(key1.public(), inbound(ctx, &key0, &mut s0).await?); + assert_eq!(key1.public(), inbound(ctx, &key0, genesis, &mut s0).await?); Ok(()) }); s.spawn(async { let mut s1 = s1; - match outbound(ctx, &key1, &mut s1, &key2.public()).await { + match outbound(ctx, &key1, genesis, &mut s1, &key2.public()).await { Err(Error::PeerMismatch) => Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -108,6 +114,60 @@ async fn test_peer_mismatch() { .unwrap(); } +#[tokio::test] +async fn test_genesis_mismatch() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let key0: validator::SecretKey = rng.gen(); + let key1: validator::SecretKey = rng.gen(); + + tracing::info!("test that inbound handshake rejects mismatching genesis"); + scope::run!(ctx, |ctx, s| async { + let (s0, mut s1) = noise::testonly::pipe(ctx).await; + s.spawn(async { + let mut s0 = s0; + let res = outbound(ctx, &key0, ctx.rng().gen(), &mut s0, &key1.public()).await; + assert_matches!(res, Err(Error::Stream(_))); + Ok(()) + }); + let res = inbound(ctx, &key1, rng.gen(), &mut s1).await; + assert_matches!(res, Err(Error::GenesisMismatch)); + anyhow::Ok(()) + }) + .await + .unwrap(); + + tracing::info!("test that outbound handshake rejects mismatching genesis"); + scope::run!(ctx, |ctx, s| async { + let (s0, mut s1) = noise::testonly::pipe(ctx).await; + s.spawn(async { + let mut s0 = s0; + let res = outbound(ctx, &key0, ctx.rng().gen(), &mut s0, &key1.public()).await; + assert_matches!(res, Err(Error::GenesisMismatch)); + Ok(()) + }); + let session_id = node::SessionId(s1.id().encode()); + let _: Handshake = frame::recv_proto(ctx, &mut s1, Handshake::max_size()) + .await + .unwrap(); + frame::send_proto( + ctx, + &mut s1, + &Handshake { + session_id: key1.sign_msg(session_id), + genesis: rng.gen(), + }, + ) + .await + .unwrap(); + anyhow::Ok(()) + }) + .await + .unwrap(); +} + #[tokio::test] async fn test_invalid_signature() { abort_on_panic(); @@ -117,6 +177,8 @@ async fn test_invalid_signature() { let key0: validator::SecretKey = rng.gen(); let key1: validator::SecretKey = rng.gen(); + let genesis: validator::GenesisHash = rng.gen(); + // Bad signature detected on outbound end. scope::run!(ctx, |ctx, s| async { let (mut s0, s1) = noise::testonly::pipe(ctx).await; @@ -127,7 +189,7 @@ async fn test_invalid_signature() { frame::send_proto(ctx, &mut s1, &h).await?; Ok(()) }); - match outbound(ctx, &key0, &mut s0, &key1.public()).await { + match outbound(ctx, &key0, genesis, &mut s0, &key1.public()).await { Err(Error::Signature(..)) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -142,11 +204,12 @@ async fn test_invalid_signature() { let mut s1 = s1; let mut h = Handshake { session_id: key0.sign_msg(node::SessionId(s1.id().encode())), + genesis, }; h.session_id.key = key1.public(); frame::send_proto(ctx, &mut s1, &h).await }); - match inbound(ctx, &key0, &mut s0).await { + match inbound(ctx, &key0, genesis, &mut s0).await { Err(Error::Signature(..)) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } diff --git a/node/actors/network/src/consensus/mod.rs b/node/actors/network/src/consensus/mod.rs index e6689b4f..32a90540 100644 --- a/node/actors/network/src/consensus/mod.rs +++ b/node/actors/network/src/consensus/mod.rs @@ -1,13 +1,238 @@ //! Consensus network is a full graph of connections between all validators. //! BFT consensus messages are exchanged over this network. +use crate::{config, gossip, io, noise, pool::PoolWatch, preface, rpc}; +use anyhow::Context as _; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use zksync_concurrency::{ctx, oneshot, scope, sync, time}; +use zksync_consensus_roles::validator; +use zksync_protobuf::kB; + mod handshake; -mod runner; -mod state; #[cfg(test)] mod tests; -// Clippy doesn't care about visibility required only for tests. -pub(crate) use runner::*; -#[allow(clippy::wildcard_imports)] -pub use state::Config; -pub(crate) use state::*; +const RESP_MAX_SIZE: usize = kB; +/// Frequency at which the validator broadcasts its own IP address. +/// Although the IP is not likely to change while validator is running, +/// we do this periodically, so that the network can observe if validator +/// is down. +const ADDRESS_ANNOUNCER_INTERVAL: time::Duration = time::Duration::minutes(10); + +/// Consensus network state. +pub(crate) struct Network { + /// Gossip network state to bootstrap consensus network from. + pub(crate) gossip: Arc, + /// This validator's secret key. + pub(crate) key: validator::SecretKey, + /// Set of the currently open inbound connections. + pub(crate) inbound: PoolWatch, + /// Set of the currently open outbound connections. + pub(crate) outbound: PoolWatch, + /// RPC clients for all validators. + pub(crate) clients: HashMap>, +} + +#[async_trait::async_trait] +impl rpc::Handler for &Network { + /// Here we bound the buffering of incoming consensus messages. + fn max_req_size(&self) -> usize { + self.gossip.cfg.max_block_size.saturating_add(kB) + } + + async fn handle( + &self, + ctx: &ctx::Ctx, + req: rpc::consensus::Req, + ) -> anyhow::Result { + let (send, recv) = oneshot::channel(); + self.gossip + .sender + .send(io::OutputMessage::Consensus(io::ConsensusReq { + msg: req.0, + ack: send, + })); + recv.recv_or_disconnected(ctx).await??; + Ok(rpc::consensus::Resp) + } +} + +impl Network { + /// Constructs a new consensus network state. + pub(crate) fn new(ctx: &ctx::Ctx, gossip: Arc) -> Option> { + let key = gossip.cfg.validator_key.clone()?; + let validators: HashSet<_> = gossip.genesis().validators.iter().cloned().collect(); + Some(Arc::new(Self { + key, + inbound: PoolWatch::new(validators.clone(), 0), + outbound: PoolWatch::new(validators.clone(), 0), + clients: validators + .iter() + .map(|peer| { + ( + peer.clone(), + rpc::Client::new(ctx, gossip.cfg.rpc.consensus_rate), + ) + }) + .collect(), + gossip, + })) + } + + /// Sends a message to all validators. + pub(crate) async fn broadcast( + &self, + ctx: &ctx::Ctx, + msg: validator::Signed, + ) -> anyhow::Result<()> { + let req = rpc::consensus::Req(msg); + scope::run!(ctx, |ctx, s| async { + for (peer, client) in &self.clients { + s.spawn(async { + if let Err(err) = client.call(ctx, &req, RESP_MAX_SIZE).await { + tracing::info!("send({:?},): {err:#}", &*peer); + } + Ok(()) + }); + } + Ok(()) + }) + .await + } + + /// Sends a message to the given validator. + pub(crate) async fn send( + &self, + ctx: &ctx::Ctx, + key: &validator::PublicKey, + msg: validator::Signed, + ) -> anyhow::Result<()> { + let client = self.clients.get(key).context("not an active validator")?; + client + .call(ctx, &rpc::consensus::Req(msg), RESP_MAX_SIZE) + .await?; + Ok(()) + } + + /// Performs handshake of an inbound stream. + /// Closes the stream if there is another inbound stream opened from the same validator. + pub(crate) async fn run_inbound_stream( + &self, + ctx: &ctx::Ctx, + mut stream: noise::Stream, + ) -> anyhow::Result<()> { + let peer = + handshake::inbound(ctx, &self.key, self.gossip.genesis().hash(), &mut stream).await?; + self.inbound.insert(peer.clone()).await?; + let res = scope::run!(ctx, |ctx, s| async { + let mut service = rpc::Service::new() + .add_server(rpc::ping::Server, rpc::ping::RATE) + .add_server(self, self.gossip.cfg.rpc.consensus_rate); + if let Some(ping_timeout) = &self.gossip.cfg.ping_timeout { + let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); + service = service.add_client(&ping_client); + s.spawn(async { + let ping_client = ping_client; + ping_client.ping_loop(ctx, *ping_timeout).await + }); + } + service.run(ctx, stream).await?; + Ok(()) + }) + .await; + self.inbound.remove(&peer).await; + res + } + + async fn run_outbound_stream( + &self, + ctx: &ctx::Ctx, + peer: &validator::PublicKey, + addr: std::net::SocketAddr, + ) -> anyhow::Result<()> { + let client = self.clients.get(peer).context("not an active validator")?; + let mut stream = preface::connect(ctx, addr, preface::Endpoint::ConsensusNet).await?; + handshake::outbound( + ctx, + &self.key, + self.gossip.genesis().hash(), + &mut stream, + peer, + ) + .await?; + self.outbound.insert(peer.clone()).await?; + let res = scope::run!(ctx, |ctx, s| async { + let mut service = rpc::Service::new() + .add_server(rpc::ping::Server, rpc::ping::RATE) + .add_client(client); + if let Some(ping_timeout) = &self.gossip.cfg.ping_timeout { + let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); + service = service.add_client(&ping_client); + s.spawn(async { + let ping_client = ping_client; + ping_client.ping_loop(ctx, *ping_timeout).await + }); + } + service.run(ctx, stream).await?; + Ok(()) + }) + .await; + self.outbound.remove(peer).await; + res + } + + /// Maintains a connection to the given validator. + /// If connection breaks, it tries to reconnect periodically. + pub(crate) async fn maintain_connection(&self, ctx: &ctx::Ctx, peer: &validator::PublicKey) { + let addrs = &mut self.gossip.validator_addrs.subscribe(); + let mut addr = None; + while ctx.is_active() { + // Wait for a new address, or retry with the old one after timeout. + if let Ok(new) = + sync::wait_for(&ctx.with_timeout(config::CONNECT_RETRY), addrs, |addrs| { + addrs.get(peer).map(|x| x.msg.addr) != addr + }) + .await + { + addr = new.get(peer).map(|x| x.msg.addr); + } + let Some(addr) = addr else { continue }; + if let Err(err) = self.run_outbound_stream(ctx, peer, addr).await { + tracing::info!("run_outbound_stream({peer:?},{addr}): {err:#}"); + } + } + } + + /// Periodically announces this validator's public IP over gossip network, + /// so that other validators can discover and connect to this validator. + pub(crate) async fn run_address_announcer(&self, ctx: &ctx::Ctx) { + let my_addr = self.gossip.cfg.public_addr; + let mut sub = self.gossip.validator_addrs.subscribe(); + while ctx.is_active() { + let ctx = &ctx.with_timeout(ADDRESS_ANNOUNCER_INTERVAL); + let _ = sync::wait_for(ctx, &mut sub, |got| { + got.get(&self.key.public()).map(|x| &x.msg.addr) != Some(&my_addr) + }) + .await; + let next_version = sub + .borrow() + .get(&self.key.public()) + .map(|x| x.msg.version + 1) + .unwrap_or(0); + self.gossip + .validator_addrs + .update( + &self.gossip.genesis().validators, + &[Arc::new(self.key.sign_msg(validator::NetAddress { + addr: my_addr, + version: next_version, + timestamp: ctx.now_utc(), + }))], + ) + .await + .unwrap(); + } + } +} diff --git a/node/actors/network/src/consensus/runner.rs b/node/actors/network/src/consensus/runner.rs deleted file mode 100644 index ca988b0c..00000000 --- a/node/actors/network/src/consensus/runner.rs +++ /dev/null @@ -1,190 +0,0 @@ -//! run_client routine maintaining outbound connections to validators. - -use super::handshake; -use crate::{io, noise, preface, rpc, State}; -use anyhow::Context as _; -use std::{collections::HashMap, sync::Arc}; -use zksync_concurrency::{ctx, ctx::channel, oneshot, scope, sync, time}; -use zksync_consensus_roles::validator; -use zksync_protobuf::kB; - -/// How often we should retry to establish a connection to a validator. -/// TODO(gprusak): once it becomes relevant, choose a more appropriate retry strategy. -const CONNECT_RETRY: time::Duration = time::Duration::seconds(20); -/// A ping request is sent periodically. If ping response doesn't arrive -/// within PING_TIMEOUT, we close the connection. -const PING_TIMEOUT: time::Duration = time::Duration::seconds(10); -/// Each consensus message is expected to be delivered within MSG_TIMEOUT. -/// After that time the message is dropped. -/// TODO(gprusak): for liveness we should retry sending messages until the view -/// changes. That requires tighter integration with the consensus. -const MSG_TIMEOUT: time::Duration = time::Duration::seconds(10); - -struct Server { - out: channel::UnboundedSender, - max_block_size: usize, -} - -#[async_trait::async_trait] -impl rpc::Handler for Server { - /// Here we bound the buffering of incoming consensus messages. - fn max_req_size(&self) -> usize { - self.max_block_size.saturating_add(kB) - } - - async fn handle( - &self, - ctx: &ctx::Ctx, - req: rpc::consensus::Req, - ) -> anyhow::Result { - let (send, recv) = oneshot::channel(); - self.out - .send(io::OutputMessage::Consensus(io::ConsensusReq { - msg: req.0, - ack: send, - })); - recv.recv_or_disconnected(ctx).await??; - Ok(rpc::consensus::Resp) - } -} - -/// Performs handshake of an inbound stream. -/// Closes the stream if there is another inbound stream opened from the same validator. -pub(crate) async fn run_inbound_stream( - ctx: &ctx::Ctx, - state: &State, - sender: &channel::UnboundedSender, - mut stream: noise::Stream, -) -> anyhow::Result<()> { - let consensus_state = state - .consensus - .as_ref() - .context("Node does not accept consensus network connections")?; - let peer = handshake::inbound(ctx, &consensus_state.cfg.key, &mut stream).await?; - consensus_state.inbound.insert(peer.clone()).await?; - let res = scope::run!(ctx, |ctx, s| async { - let mut service = rpc::Service::new() - .add_server(rpc::ping::Server) - .add_server(Server { - out: sender.clone(), - max_block_size: state.cfg.max_block_size, - }); - if state.cfg.enable_pings { - let ping_client = rpc::Client::::new(ctx); - service = service.add_client(&ping_client); - s.spawn(async { - let ping_client = ping_client; - ping_client.ping_loop(ctx, PING_TIMEOUT).await - }); - } - service.run(ctx, stream).await?; - Ok(()) - }) - .await; - consensus_state.inbound.remove(&peer).await; - res -} - -async fn run_outbound_stream( - ctx: &ctx::Ctx, - state: &super::State, - client: &rpc::Client, - peer: &validator::PublicKey, - addr: std::net::SocketAddr, -) -> anyhow::Result<()> { - let mut stream = preface::connect(ctx, addr, preface::Endpoint::ConsensusNet).await?; - handshake::outbound(ctx, &state.cfg.key, &mut stream, peer).await?; - state.outbound.insert(peer.clone()).await?; - let ping_client = rpc::Client::::new(ctx); - let res = scope::run!(ctx, |ctx, s| async { - s.spawn(ping_client.ping_loop(ctx, PING_TIMEOUT)); - rpc::Service::new() - .add_client(&ping_client) - .add_server(rpc::ping::Server) - .add_client(client) - .run(ctx, stream) - .await?; - Ok(()) - }) - .await; - state.outbound.remove(peer).await; - res -} - -/// Runs an Rpc client trying to maintain 1 outbound connection per validator. -pub(crate) async fn run_client( - ctx: &ctx::Ctx, - state: &super::State, - shared_state: &State, - mut receiver: channel::UnboundedReceiver, -) -> anyhow::Result<()> { - let clients: HashMap<_, _> = shared_state - .cfg - .validators - .iter() - .map(|peer| (peer.clone(), rpc::Client::::new(ctx))) - .collect(); - - scope::run!(ctx, |ctx, s| async { - // Spawn outbound connections. - for (peer, client) in &clients { - s.spawn::<()>(async { - let client = &*client; - let addrs = &mut shared_state.gossip.validator_addrs.subscribe(); - let mut addr = None; - while ctx.is_active() { - if let Ok(new) = - sync::wait_for(&ctx.with_timeout(CONNECT_RETRY), addrs, |addrs| { - addrs.get(peer).map(|x| x.msg.addr) != addr - }) - .await - { - addr = new.get(peer).map(|x| x.msg.addr); - } - if let Some(addr) = addr { - if let Err(err) = run_outbound_stream(ctx, state, client, peer, addr).await - { - tracing::info!("run_outbound_stream({addr}): {err:#}"); - } - } - } - Ok(()) - }); - } - - // Call RPCs. - while let Ok(msg) = receiver.recv(ctx).await { - match msg.recipient { - io::Target::Validator(val) => { - let client = clients.get(&val).context("unknown validator")?; - s.spawn(async { - let req = rpc::consensus::Req(msg.message); - if let Err(err) = - client.call(&ctx.with_timeout(MSG_TIMEOUT), &req, kB).await - { - tracing::info!("client.consensus(): {err:#}"); - } - Ok(()) - }); - } - io::Target::Broadcast => { - let req = Arc::new(rpc::consensus::Req(msg.message)); - for client in clients.values() { - let req = req.clone(); - s.spawn(async { - let req = req; - if let Err(err) = - client.call(&ctx.with_timeout(MSG_TIMEOUT), &req, kB).await - { - tracing::info!("client.consensus(): {err:#}"); - } - Ok(()) - }); - } - } - } - } - Ok(()) - }) - .await -} diff --git a/node/actors/network/src/consensus/state.rs b/node/actors/network/src/consensus/state.rs deleted file mode 100644 index e8521283..00000000 --- a/node/actors/network/src/consensus/state.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::pool::PoolWatch; -use std::collections::HashSet; -use zksync_consensus_roles::{validator, validator::ValidatorSet}; - -/// Configuration of the consensus network. -#[derive(Debug, Clone)] -pub struct Config { - /// Private key of the validator. Currently only validator nodes - /// are supported, but eventually it will become optional. - pub key: validator::SecretKey, - - /// Public TCP address that other validators are expected to connect to. - /// It is announced over gossip network. - pub public_addr: std::net::SocketAddr, -} - -/// Consensus network state. -pub(crate) struct State { - /// Consensus configuration. - pub(crate) cfg: Config, - /// Set of the currently open inbound connections. - pub(crate) inbound: PoolWatch, - /// Set of the currently open outbound connections. - pub(crate) outbound: PoolWatch, -} - -impl State { - /// Constructs a new State. - pub(crate) fn new(cfg: Config, validators: &ValidatorSet) -> anyhow::Result { - let validators: HashSet<_> = validators.iter().cloned().collect(); - let current_validator_key = cfg.key.public(); - anyhow::ensure!( - validators.contains(¤t_validator_key), - "Validators' public keys {validators:?} do not contain the current validator \ - {current_validator_key:?}; this is not yet supported" - ); - // ^ This check will be relaxed once we support dynamic validator membership - - Ok(Self { - cfg, - inbound: PoolWatch::new(validators.clone(), 0), - outbound: PoolWatch::new(validators, 0), - }) - } -} diff --git a/node/actors/network/src/consensus/tests.rs b/node/actors/network/src/consensus/tests.rs index fcf8c574..f2adc113 100644 --- a/node/actors/network/src/consensus/tests.rs +++ b/node/actors/network/src/consensus/tests.rs @@ -1,5 +1,6 @@ use super::*; -use crate::{io, preface, rpc, testonly}; +use crate::{io, metrics, preface, rpc, testonly}; +use assert_matches::assert_matches; use rand::Rng; use tracing::Instrument as _; use zksync_concurrency::{ctx, net, scope, testonly::abort_on_panic}; @@ -11,14 +12,14 @@ async fn test_one_connection_per_validator() { abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 3); + let setup = validator::testonly::Setup::new(rng, 3); let nodes = testonly::new_configs(rng, &setup, 1); scope::run!(ctx, |ctx,s| async { - let (store,runner) = new_store(ctx,&setup.blocks[0]).await; + let (store,runner) = new_store(ctx,&setup.genesis).await; s.spawn_bg(runner.run(ctx)); let nodes : Vec<_> = nodes.into_iter().enumerate().map(|(i,node)| { - let (node,runner) = testonly::Instance::new(node, store.clone()); + let (node,runner) = testonly::Instance::new(ctx, node, store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }).collect(); @@ -36,16 +37,17 @@ async fn test_one_connection_per_validator() { tracing::info!("Impersonate node 1, and try to establish additional connection to node 0. It should close automatically after the handshake."); let mut stream = preface::connect( ctx, - *nodes[0].state.cfg.server_addr, + *nodes[0].cfg().server_addr, preface::Endpoint::ConsensusNet, ) .await?; handshake::outbound( ctx, - &nodes[1].consensus_config().key, + &nodes[1].cfg().validator_key.clone().unwrap(), + setup.genesis.hash(), &mut stream, - &nodes[0].consensus_config().key.public(), + &nodes[0].cfg().validator_key.as_ref().unwrap().public(), ) .await?; // The connection is expected to be closed automatically by node 0. @@ -61,22 +63,89 @@ async fn test_one_connection_per_validator() { .unwrap(); } +#[tokio::test] +async fn test_genesis_mismatch() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = validator::testonly::Setup::new(rng, 2); + let cfgs = testonly::new_configs(rng, &setup, /*gossip_peers=*/ 0); + + scope::run!(ctx, |ctx, s| async { + let mut listener = cfgs[1].server_addr.bind().context("server_addr.bind()")?; + + tracing::info!("Start one node, we will simulate the other one."); + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (node, runner) = testonly::Instance::new(ctx, cfgs[0].clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + tracing::info!("Populate the validator_addrs of the running node."); + node.net + .gossip + .validator_addrs + .update( + &setup.genesis.validators, + &[Arc::new(setup.keys[1].sign_msg(validator::NetAddress { + addr: cfgs[1].public_addr, + version: 0, + timestamp: ctx.now_utc(), + }))], + ) + .await + .unwrap(); + + tracing::info!("Accept a connection with mismatching genesis."); + let stream = metrics::MeteredStream::listen(ctx, &mut listener) + .await? + .context("listen()")?; + let (mut stream, endpoint) = preface::accept(ctx, stream) + .await + .context("preface::accept()")?; + assert_eq!(endpoint, preface::Endpoint::ConsensusNet); + tracing::info!("Expect the handshake to fail"); + let res = handshake::inbound(ctx, &setup.keys[1], rng.gen(), &mut stream).await; + assert_matches!(res, Err(handshake::Error::GenesisMismatch)); + + tracing::info!("Try to connect to a node with a mismatching genesis."); + let mut stream = + preface::connect(ctx, cfgs[0].public_addr, preface::Endpoint::ConsensusNet) + .await + .context("preface::connect")?; + let res = handshake::outbound( + ctx, + &setup.keys[1], + rng.gen(), + &mut stream, + &setup.keys[0].public(), + ) + .await; + tracing::info!( + "Expect the peer to verify the mismatching Genesis and close the connection." + ); + assert_matches!(res, Err(handshake::Error::Stream(_))); + Ok(()) + }) + .await + .unwrap(); +} + #[tokio::test(flavor = "multi_thread")] async fn test_address_change() { abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(20.)); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 5); + let setup = validator::testonly::Setup::new(rng, 5); let mut cfgs = testonly::new_configs(rng, &setup, 1); scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); let mut nodes: Vec<_> = cfgs .iter() .enumerate() .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg.clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }) @@ -87,7 +156,7 @@ async fn test_address_change() { nodes[0].terminate(ctx).await?; // All nodes should lose connection to node[0]. - let key0 = nodes[0].consensus_config().key.public(); + let key0 = nodes[0].cfg().validator_key.as_ref().unwrap().public(); for node in &nodes { node.wait_for_consensus_disconnect(ctx, &key0).await?; } @@ -98,8 +167,8 @@ async fn test_address_change() { // Then it should broadcast its new address and the consensus network // should get reconstructed. cfgs[0].server_addr = net::tcp::testonly::reserve_listener(); - cfgs[0].consensus.as_mut().unwrap().public_addr = *cfgs[0].server_addr; - let (node0, runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); + cfgs[0].public_addr = *cfgs[0].server_addr; + let (node0, runner) = testonly::Instance::new(ctx, cfgs[0].clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node0"))); nodes[0] = node0; for n in &nodes { @@ -119,18 +188,26 @@ async fn test_transmission() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 2); + let setup = validator::testonly::Setup::new(rng, 2); let cfgs = testonly::new_configs(rng, &setup, 1); scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); let mut nodes: Vec<_> = cfgs .iter() .enumerate() .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); - s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + let (node, runner) = testonly::Instance::new(ctx, cfg.clone(), store.clone()); + let i = ctx::NoCopy(i); + s.spawn_bg(async { + let i = i; + runner + .run(ctx) + .instrument(tracing::info_span!("node", i = *i)) + .await + .context(*i) + }); node }) .collect(); @@ -143,7 +220,9 @@ async fn test_transmission() { let want: validator::Signed = rng.gen(); let in_message = io::ConsensusInputMessage { message: want.clone(), - recipient: io::Target::Validator(nodes[1].consensus_config().key.public()), + recipient: io::Target::Validator( + nodes[1].cfg().validator_key.as_ref().unwrap().public(), + ), }; nodes[0].pipe.send(in_message.into()); diff --git a/node/actors/network/src/event.rs b/node/actors/network/src/event.rs deleted file mode 100644 index adf792e2..00000000 --- a/node/actors/network/src/event.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Mechanism for network State to report internal events. -//! It is used in tests to await a specific state. -use crate::State; - -impl State { - /// Sends an event to the `self.events` channel. - /// Noop if `self.events` is None. - pub(crate) fn event(&self, e: Event) { - if let Some(events) = &self.events { - events.send(e); - } - } -} - -/// Events observable in tests. -/// Feel free to extend this enum if you need to -/// write a test awaiting some specific event/state. -#[derive(Debug)] -pub enum Event { - ValidatorAddrsUpdated, -} diff --git a/node/actors/network/src/gossip/arcmap.rs b/node/actors/network/src/gossip/arcmap.rs new file mode 100644 index 00000000..2fe3307d --- /dev/null +++ b/node/actors/network/src/gossip/arcmap.rs @@ -0,0 +1,42 @@ +//! Multimap of pointers indexed by `node::PublicKey`. +//! Used to maintain a collection GetBlock rpc clients. +//! TODO(gprusak): consider upgrading PoolWatch instead. +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; +use zksync_consensus_roles::node; + +/// ArcMap +pub(crate) struct ArcMap(Mutex>>>); + +impl Default for ArcMap { + fn default() -> Self { + Self(Mutex::default()) + } +} + +impl ArcMap { + /// Fetches any pointer for the given key. + pub(crate) fn get_any(&self, key: &node::PublicKey) -> Option> { + self.0.lock().unwrap().get(key)?.first().cloned() + } + + /// Insert a pointer. + pub(crate) fn insert(&self, key: node::PublicKey, p: Arc) { + self.0.lock().unwrap().entry(key).or_default().push(p); + } + + /// Removes a pointer. + pub(crate) fn remove(&self, key: node::PublicKey, p: Arc) { + let mut this = self.0.lock().unwrap(); + use std::collections::hash_map::Entry; + let Entry::Occupied(mut e) = this.entry(key) else { + return; + }; + e.get_mut().retain(|c| !Arc::ptr_eq(&p, c)); + if e.get_mut().is_empty() { + e.remove(); + } + } +} diff --git a/node/actors/network/src/gossip/handshake/mod.rs b/node/actors/network/src/gossip/handshake/mod.rs index f5d7b22e..de412474 100644 --- a/node/actors/network/src/gossip/handshake/mod.rs +++ b/node/actors/network/src/gossip/handshake/mod.rs @@ -1,9 +1,8 @@ -use super::Config; -use crate::{frame, noise, proto::gossip as proto}; +use crate::{frame, noise, proto::gossip as proto, GossipConfig}; use anyhow::Context as _; use zksync_concurrency::{ctx, time}; use zksync_consensus_crypto::ByteFmt; -use zksync_consensus_roles::node; +use zksync_consensus_roles::{node, validator}; use zksync_protobuf::{read_required, required, ProtoFmt}; #[cfg(test)] @@ -20,6 +19,9 @@ pub(crate) struct Handshake { /// Session ID signed with the node key. /// Authenticates the peer to be the owner of the node key. pub(crate) session_id: node::Signed, + /// Hash of the blockchain genesis specification. + /// Only nodes with the same genesis belong to the same network. + pub(crate) genesis: validator::GenesisHash, /// Information whether the peer treats this connection as static. /// It is informational only, it doesn't affect the logic of the node. pub(crate) is_static: bool, @@ -30,12 +32,14 @@ impl ProtoFmt for Handshake { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { session_id: read_required(&r.session_id).context("session_id")?, + genesis: read_required(&r.genesis).context("genesis")?, is_static: *required(&r.is_static).context("is_static")?, }) } fn build(&self) -> Self::Proto { Self::Proto { session_id: Some(self.session_id.build()), + genesis: Some(self.genesis.build()), is_static: Some(self.is_static), } } @@ -44,19 +48,22 @@ impl ProtoFmt for Handshake { /// Error returned by gossip handshake logic. #[derive(Debug, thiserror::Error)] pub(super) enum Error { + #[error("genesis mismatch")] + GenesisMismatch, #[error("session id mismatch")] SessionIdMismatch, #[error("unexpected peer")] PeerMismatch, - #[error("validator signature")] + #[error(transparent)] Signature(#[from] node::InvalidSignatureError), - #[error("stream")] - Stream(#[source] anyhow::Error), + #[error(transparent)] + Stream(anyhow::Error), } pub(super) async fn outbound( ctx: &ctx::Ctx, - cfg: &Config, + cfg: &GossipConfig, + genesis: validator::GenesisHash, stream: &mut noise::Stream, peer: &node::PublicKey, ) -> Result<(), Error> { @@ -67,6 +74,7 @@ pub(super) async fn outbound( stream, &Handshake { session_id: cfg.key.sign_msg(session_id.clone()), + genesis, is_static: cfg.static_outbound.contains_key(peer), }, ) @@ -75,6 +83,9 @@ pub(super) async fn outbound( let h: Handshake = frame::recv_proto(ctx, stream, Handshake::max_size()) .await .map_err(Error::Stream)?; + if h.genesis != genesis { + return Err(Error::GenesisMismatch); + } if h.session_id.msg != session_id { return Err(Error::SessionIdMismatch); } @@ -87,7 +98,8 @@ pub(super) async fn outbound( pub(super) async fn inbound( ctx: &ctx::Ctx, - cfg: &Config, + cfg: &GossipConfig, + genesis: validator::GenesisHash, stream: &mut noise::Stream, ) -> Result { let ctx = &ctx.with_timeout(TIMEOUT); @@ -98,12 +110,16 @@ pub(super) async fn inbound( if h.session_id.msg != session_id { return Err(Error::SessionIdMismatch); } + if h.genesis != genesis { + return Err(Error::GenesisMismatch); + } h.session_id.verify()?; frame::send_proto( ctx, stream, &Handshake { session_id: cfg.key.sign_msg(session_id.clone()), + genesis, is_static: cfg.static_inbound.contains(&h.session_id.key), }, ) diff --git a/node/actors/network/src/gossip/handshake/testonly.rs b/node/actors/network/src/gossip/handshake/testonly.rs index d45531d2..c0894adf 100644 --- a/node/actors/network/src/gossip/handshake/testonly.rs +++ b/node/actors/network/src/gossip/handshake/testonly.rs @@ -15,6 +15,7 @@ impl Distribution for Standard { let session_id: node::SessionId = rng.gen(); Handshake { session_id: key.sign_msg(session_id), + genesis: rng.gen(), is_static: rng.gen(), } } diff --git a/node/actors/network/src/gossip/handshake/tests.rs b/node/actors/network/src/gossip/handshake/tests.rs index 2e024861..efb3471a 100644 --- a/node/actors/network/src/gossip/handshake/tests.rs +++ b/node/actors/network/src/gossip/handshake/tests.rs @@ -1,5 +1,6 @@ use super::*; -use crate::{frame, noise, testonly}; +use crate::{frame, noise, testonly, GossipConfig}; +use assert_matches::assert_matches; use rand::Rng; use std::collections::{HashMap, HashSet}; use zksync_concurrency::{ctx, io, scope, testonly::abort_on_panic}; @@ -12,8 +13,8 @@ fn test_schema_encode_decode() { test_encode_random::(rng); } -fn make_cfg(rng: &mut R) -> Config { - Config { +fn make_cfg(rng: &mut R) -> GossipConfig { + GossipConfig { key: rng.gen(), dynamic_inbound_limit: 0, static_inbound: HashSet::default(), @@ -29,6 +30,7 @@ async fn test_session_id_mismatch() { let cfg0 = make_cfg(rng); let cfg1 = make_cfg(rng); + let genesis: validator::GenesisHash = rng.gen(); // MitM attempt detected on the inbound end. scope::run!(ctx, |ctx, s| async { @@ -46,14 +48,14 @@ async fn test_session_id_mismatch() { }); s.spawn(async { let mut s4 = s4; - match inbound(ctx, &cfg0, &mut s4).await { + match inbound(ctx, &cfg0, genesis, &mut s4).await { Err(Error::SessionIdMismatch) => Ok(()), res => panic!("unexpected res: {res:?}"), } }); s.spawn(async { let mut s1 = s1; - match outbound(ctx, &cfg1, &mut s1, &cfg0.key.public()).await { + match outbound(ctx, &cfg1, genesis, &mut s1, &cfg0.key.public()).await { Err(Error::Stream(..)) => Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -74,13 +76,14 @@ async fn test_session_id_mismatch() { &mut s2, &Handshake { session_id: cfg1.key.sign_msg(rng.gen::()), + genesis, is_static: false, }, ) .await?; Ok(()) }); - match outbound(ctx, &cfg0, &mut s1, &cfg1.key.public()).await { + match outbound(ctx, &cfg0, genesis, &mut s1, &cfg1.key.public()).await { Err(Error::SessionIdMismatch) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -99,16 +102,21 @@ async fn test_peer_mismatch() { let cfg1 = make_cfg(rng); let cfg2 = make_cfg(rng); + let genesis: validator::GenesisHash = rng.gen(); + scope::run!(ctx, |ctx, s| async { let (s0, s1) = noise::testonly::pipe(ctx).await; s.spawn(async { let mut s0 = s0; - assert_eq!(cfg1.key.public(), inbound(ctx, &cfg0, &mut s0).await?); + assert_eq!( + cfg1.key.public(), + inbound(ctx, &cfg0, genesis, &mut s0).await? + ); Ok(()) }); s.spawn(async { let mut s1 = s1; - match outbound(ctx, &cfg1, &mut s1, &cfg2.key.public()).await { + match outbound(ctx, &cfg1, genesis, &mut s1, &cfg2.key.public()).await { Err(Error::PeerMismatch) => Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -119,6 +127,61 @@ async fn test_peer_mismatch() { .unwrap(); } +#[tokio::test] +async fn test_genesis_mismatch() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let cfg0 = make_cfg(rng); + let cfg1 = make_cfg(rng); + + tracing::info!("test that inbound handshake rejects mismatching genesis"); + scope::run!(ctx, |ctx, s| async { + let (s0, mut s1) = noise::testonly::pipe(ctx).await; + s.spawn(async { + let mut s0 = s0; + let res = outbound(ctx, &cfg0, ctx.rng().gen(), &mut s0, &cfg1.key.public()).await; + assert_matches!(res, Err(Error::Stream(_))); + Ok(()) + }); + let res = inbound(ctx, &cfg1, rng.gen(), &mut s1).await; + assert_matches!(res, Err(Error::GenesisMismatch)); + anyhow::Ok(()) + }) + .await + .unwrap(); + + tracing::info!("test that outbound handshake rejects mismatching genesis"); + scope::run!(ctx, |ctx, s| async { + let (s0, mut s1) = noise::testonly::pipe(ctx).await; + s.spawn(async { + let mut s0 = s0; + let res = outbound(ctx, &cfg0, ctx.rng().gen(), &mut s0, &cfg1.key.public()).await; + assert_matches!(res, Err(Error::GenesisMismatch)); + Ok(()) + }); + let session_id = node::SessionId(s1.id().encode()); + let _: Handshake = frame::recv_proto(ctx, &mut s1, Handshake::max_size()) + .await + .unwrap(); + frame::send_proto( + ctx, + &mut s1, + &Handshake { + session_id: cfg1.key.sign_msg(session_id), + genesis: rng.gen(), + is_static: false, + }, + ) + .await + .unwrap(); + anyhow::Ok(()) + }) + .await + .unwrap(); +} + #[tokio::test] async fn test_invalid_signature() { abort_on_panic(); @@ -128,6 +191,8 @@ async fn test_invalid_signature() { let cfg0 = make_cfg(rng); let cfg1 = make_cfg(rng); + let genesis: validator::GenesisHash = rng.gen(); + // Bad signature detected on outbound end. scope::run!(ctx, |ctx, s| async { let (mut s0, s1) = noise::testonly::pipe(ctx).await; @@ -138,7 +203,7 @@ async fn test_invalid_signature() { frame::send_proto(ctx, &mut s1, &h).await?; Ok(()) }); - match outbound(ctx, &cfg0, &mut s0, &cfg1.key.public()).await { + match outbound(ctx, &cfg0, genesis, &mut s0, &cfg1.key.public()).await { Err(Error::Signature(..)) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } @@ -153,12 +218,13 @@ async fn test_invalid_signature() { let mut s1 = s1; let mut h = Handshake { session_id: cfg0.key.sign_msg(node::SessionId(s1.id().encode())), + genesis, is_static: true, }; h.session_id.key = cfg1.key.public(); frame::send_proto(ctx, &mut s1, &h).await }); - match inbound(ctx, &cfg0, &mut s0).await { + match inbound(ctx, &cfg0, genesis, &mut s0).await { Err(Error::Signature(..)) => anyhow::Ok(()), res => panic!("unexpected res: {res:?}"), } diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 7991a147..e20cf7e8 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -12,11 +12,93 @@ //! Static connections constitute a rigid "backbone" of the gossip network, which is insensitive to //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). +use crate::{ + gossip::{ArcMap, ValidatorAddrsWatch}, + io, + pool::PoolWatch, + rpc, Config, +}; +use anyhow::Context as _; +use std::sync::{atomic::AtomicUsize, Arc}; + +mod arcmap; mod handshake; mod runner; -mod state; #[cfg(test)] mod tests; +mod validator_addrs; + +pub(crate) use arcmap::*; +pub(crate) use validator_addrs::*; +use zksync_concurrency::{ctx, ctx::channel}; +use zksync_consensus_roles::{node, validator}; +use zksync_consensus_storage::BlockStore; +use zksync_protobuf::kB; + +/// Gossip network state. +pub(crate) struct Network { + /// Gossip network configuration. + pub(crate) cfg: Config, + /// Currently open inbound connections. + pub(crate) inbound: PoolWatch, + /// Currently open outbound connections. + pub(crate) outbound: PoolWatch, + /// Current state of knowledge about validators' endpoints. + pub(crate) validator_addrs: ValidatorAddrsWatch, + /// Block store to serve `get_block` requests from. + pub(crate) block_store: Arc, + /// Clients for `get_block` requests for each currently active peer. + pub(crate) get_block_clients: ArcMap>, + /// Output pipe of the network actor. + pub(crate) sender: channel::UnboundedSender, + /// TESTONLY: how many time push_validator_addrs rpc was called by the peers. + pub(crate) push_validator_addrs_calls: AtomicUsize, +} + +impl Network { + /// Constructs a new State. + pub(crate) fn new( + cfg: Config, + block_store: Arc, + sender: channel::UnboundedSender, + ) -> Arc { + Arc::new(Self { + sender, + inbound: PoolWatch::new( + cfg.gossip.static_inbound.clone(), + cfg.gossip.dynamic_inbound_limit, + ), + outbound: PoolWatch::new(cfg.gossip.static_outbound.keys().cloned().collect(), 0), + validator_addrs: ValidatorAddrsWatch::default(), + block_store, + get_block_clients: ArcMap::default(), + cfg, + push_validator_addrs_calls: 0.into(), + }) + } + + /// Genesis. + pub(crate) fn genesis(&self) -> &validator::Genesis { + self.block_store.genesis() + } -pub(crate) use runner::*; -pub use state::*; + /// Sends a GetBlock RPC to the given peer. + pub(crate) async fn get_block( + &self, + ctx: &ctx::Ctx, + recipient: &node::PublicKey, + number: validator::BlockNumber, + ) -> anyhow::Result> { + Ok(self + .get_block_clients + .get_any(recipient) + .context("recipient is unreachable")? + .call( + ctx, + &rpc::get_block::Req(number), + self.cfg.max_block_size.saturating_add(kB), + ) + .await? + .0) + } +} diff --git a/node/actors/network/src/gossip/runner.rs b/node/actors/network/src/gossip/runner.rs index 744b7b5b..b84ac131 100644 --- a/node/actors/network/src/gossip/runner.rs +++ b/node/actors/network/src/gossip/runner.rs @@ -1,28 +1,13 @@ -use super::{handshake, ValidatorAddrs}; -use crate::{consensus, event::Event, io, noise, preface, rpc, State}; +use super::{handshake, Network, ValidatorAddrs}; +use crate::{io, noise, preface, rpc}; use async_trait::async_trait; -use std::sync::Arc; -use zksync_concurrency::{ - ctx::{self, channel}, - oneshot, scope, sync, time, -}; -use zksync_consensus_roles::{node, validator}; +use std::sync::{atomic::Ordering, Arc}; +use zksync_concurrency::{ctx, oneshot, scope, sync}; +use zksync_consensus_roles::node; use zksync_consensus_storage::BlockStore; use zksync_protobuf::kB; -/// How often we should retry to establish a connection to a validator. -/// TODO(gprusak): once it becomes relevant, choose a more appropriate retry strategy. -pub(crate) const CONNECT_RETRY: time::Duration = time::Duration::seconds(20); -/// A ping request is sent periodically. If ping response doesn't arrive -/// within PING_TIMEOUT, we close the connection. -const PING_TIMEOUT: time::Duration = time::Duration::seconds(5); -/// Frequency at which the validator broadcasts its own IP address. -/// Although the IP is not likely to change while validator is running, -/// we do this periodically, so that the network can observe if validator -/// is down. -const ADDRESS_ANNOUNCER_INTERVAL: time::Duration = time::Duration::minutes(10); - -struct PushValidatorAddrsServer<'a>(&'a State); +struct PushValidatorAddrsServer<'a>(&'a Network); #[async_trait] impl rpc::Handler for PushValidatorAddrsServer<'_> { @@ -34,20 +19,21 @@ impl rpc::Handler for PushValidatorAddrsServer<' _ctx: &ctx::Ctx, req: rpc::push_validator_addrs::Req, ) -> anyhow::Result<()> { - self.0.event(Event::ValidatorAddrsUpdated); self.0 - .gossip + .push_validator_addrs_calls + .fetch_add(1, Ordering::SeqCst); + self.0 .validator_addrs - .update(&self.0.cfg.validators, &req.0[..]) + .update(&self.0.genesis().validators, &req.0[..]) .await?; Ok(()) } } -#[derive(Debug, Clone, Copy)] +#[derive(Clone, Copy)] struct PushBlockStoreStateServer<'a> { peer: &'a node::PublicKey, - sender: &'a channel::UnboundedSender, + net: &'a Network, } #[async_trait] @@ -66,7 +52,7 @@ impl rpc::Handler for PushBlockStoreStateServe state: req.0, response, }; - self.sender.send(message.into()); + self.net.sender.send(message.into()); response_receiver.recv_or_disconnected(ctx).await??; Ok(()) } @@ -86,208 +72,131 @@ impl rpc::Handler for &BlockStore { } } -async fn run_stream( - ctx: &ctx::Ctx, - state: &State, - peer: &node::PublicKey, - sender: &channel::UnboundedSender, - stream: noise::Stream, -) -> anyhow::Result<()> { - let push_validator_addrs_client = rpc::Client::::new(ctx); - let push_validator_addrs_server = PushValidatorAddrsServer(state); - let push_block_store_state_client = rpc::Client::::new(ctx); - let push_block_store_state_server = PushBlockStoreStateServer { peer, sender }; - - let get_block_client = Arc::new(rpc::Client::::new(ctx)); - state - .gossip - .get_block_clients - .insert(peer.clone(), get_block_client.clone()); - - let res = scope::run!(ctx, |ctx, s| async { - let mut service = rpc::Service::new() - .add_client(&push_validator_addrs_client) - .add_server(push_validator_addrs_server) - .add_client(&push_block_store_state_client) - .add_server(push_block_store_state_server) - .add_client(&get_block_client) - .add_server(&*state.gossip.block_store) - .add_server(rpc::ping::Server); - - if state.cfg.enable_pings { - let ping_client = rpc::Client::::new(ctx); - service = service.add_client(&ping_client); - s.spawn(async { - let ping_client = ping_client; - ping_client.ping_loop(ctx, PING_TIMEOUT).await - }); - } - - // Push block store state updates to peer. - s.spawn::<()>(async { - let mut sub = state.gossip.block_store.subscribe(); - sub.mark_changed(); - loop { - let state = sync::changed(ctx, &mut sub).await?.clone(); - let req = rpc::push_block_store_state::Req(state); - push_block_store_state_client.call(ctx, &req, kB).await?; +impl Network { + /// Manages lifecycle of a single connection. + async fn run_stream( + &self, + ctx: &ctx::Ctx, + peer: &node::PublicKey, + stream: noise::Stream, + ) -> anyhow::Result<()> { + let push_validator_addrs_client = rpc::Client::::new( + ctx, + self.cfg.rpc.push_validator_addrs_rate, + ); + let push_validator_addrs_server = PushValidatorAddrsServer(self); + let push_block_store_state_client = rpc::Client::::new( + ctx, + self.cfg.rpc.push_block_store_state_rate, + ); + let push_block_store_state_server = PushBlockStoreStateServer { peer, net: self }; + + let get_block_client = Arc::new(rpc::Client::::new( + ctx, + self.cfg.rpc.get_block_rate, + )); + self.get_block_clients + .insert(peer.clone(), get_block_client.clone()); + + let res = scope::run!(ctx, |ctx, s| async { + let mut service = rpc::Service::new() + .add_client(&push_validator_addrs_client) + .add_server( + push_validator_addrs_server, + self.cfg.rpc.push_validator_addrs_rate, + ) + .add_client(&push_block_store_state_client) + .add_server( + push_block_store_state_server, + self.cfg.rpc.push_block_store_state_rate, + ) + .add_client(&get_block_client) + .add_server(&*self.block_store, self.cfg.rpc.get_block_rate) + .add_server(rpc::ping::Server, rpc::ping::RATE); + + if let Some(ping_timeout) = &self.cfg.ping_timeout { + let ping_client = rpc::Client::::new(ctx, rpc::ping::RATE); + service = service.add_client(&ping_client); + s.spawn(async { + let ping_client = ping_client; + ping_client.ping_loop(ctx, *ping_timeout).await + }); } - }); - s.spawn::<()>(async { - // Push validator addrs updates to peer. - let mut old = ValidatorAddrs::default(); - let mut sub = state.gossip.validator_addrs.subscribe(); - sub.mark_changed(); - loop { - let new = sync::changed(ctx, &mut sub).await?.clone(); - let diff = new.get_newer(&old); - if diff.is_empty() { - continue; + // Push block store state updates to peer. + s.spawn::<()>(async { + let mut sub = self.block_store.subscribe(); + sub.mark_changed(); + loop { + let state = sync::changed(ctx, &mut sub).await?.clone(); + let req = rpc::push_block_store_state::Req(state); + push_block_store_state_client.call(ctx, &req, kB).await?; } - old = new; - let req = rpc::push_validator_addrs::Req(diff); - push_validator_addrs_client.call(ctx, &req, kB).await?; - } - }); - - service.run(ctx, stream).await?; - Ok(()) - }) - .await; - - state - .gossip - .get_block_clients - .remove(peer.clone(), get_block_client); - res -} - -/// Handles an inbound stream. -/// Closes the stream if there is another inbound stream opened from the same peer. -pub(crate) async fn run_inbound_stream( - ctx: &ctx::Ctx, - state: &State, - sender: &channel::UnboundedSender, - mut stream: noise::Stream, -) -> anyhow::Result<()> { - let peer = handshake::inbound(ctx, &state.gossip.cfg, &mut stream).await?; - tracing::Span::current().record("peer", tracing::field::debug(&peer)); - state.gossip.inbound.insert(peer.clone()).await?; - let res = run_stream(ctx, state, &peer, sender, stream).await; - state.gossip.inbound.remove(&peer).await; - res -} - -async fn run_outbound_stream( - ctx: &ctx::Ctx, - state: &State, - sender: &channel::UnboundedSender, - peer: &node::PublicKey, - addr: std::net::SocketAddr, -) -> anyhow::Result<()> { - let mut stream = preface::connect(ctx, addr, preface::Endpoint::GossipNet).await?; - handshake::outbound(ctx, &state.gossip.cfg, &mut stream, peer).await?; - - state.gossip.outbound.insert(peer.clone()).await?; - let res = run_stream(ctx, state, peer, sender, stream).await; - state.gossip.outbound.remove(peer).await; - res -} - -async fn run_address_announcer( - ctx: &ctx::Ctx, - state: &State, - consensus_state: &consensus::State, -) -> ctx::OrCanceled<()> { - let key = &consensus_state.cfg.key; - let my_addr = consensus_state.cfg.public_addr; - let mut sub = state.gossip.validator_addrs.subscribe(); - loop { - if !ctx.is_active() { - return Err(ctx::Canceled); - } - let ctx = &ctx.with_timeout(ADDRESS_ANNOUNCER_INTERVAL); - let _ = sync::wait_for(ctx, &mut sub, |got| { - got.get(&key.public()).map(|x| &x.msg.addr) != Some(&my_addr) - }) - .await; - let next_version = sub - .borrow() - .get(&key.public()) - .map(|x| x.msg.version + 1) - .unwrap_or(0); - state - .gossip - .validator_addrs - .update( - &state.cfg.validators, - &[Arc::new(key.sign_msg(validator::NetAddress { - addr: my_addr, - version: next_version, - timestamp: ctx.now_utc(), - }))], - ) - .await - .unwrap(); - } -} + }); -/// Runs an RPC client trying to maintain 1 outbound connection per validator. -pub(crate) async fn run_client( - ctx: &ctx::Ctx, - state: &State, - sender: &channel::UnboundedSender, - mut receiver: channel::UnboundedReceiver, -) -> anyhow::Result<()> { - scope::run!(ctx, |ctx, s| async { - // Spawn a tasks handling static outbound connections. - for (peer, addr) in &state.gossip.cfg.static_outbound { s.spawn::<()>(async { + // Push validator addrs updates to peer. + let mut old = ValidatorAddrs::default(); + let mut sub = self.validator_addrs.subscribe(); + sub.mark_changed(); loop { - let run_result = run_outbound_stream(ctx, state, sender, peer, *addr).await; - if let Err(err) = run_result { - tracing::info!("run_client_stream(): {err:#}"); + let new = sync::changed(ctx, &mut sub).await?.clone(); + let diff = new.get_newer(&old); + if diff.is_empty() { + continue; } - ctx.sleep(CONNECT_RETRY).await?; + old = new; + let req = rpc::push_validator_addrs::Req(diff); + push_validator_addrs_client.call(ctx, &req, kB).await?; } }); - } - s.spawn(async { - while let Ok(message) = receiver.recv(ctx).await { - s.spawn(async { - let message = message; - let io::SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - } = message; - let _ = response.send( - match state - .gossip - .get_block(ctx, &recipient, number, state.cfg.max_block_size) - .await - { - Ok(Some(block)) => Ok(block), - Ok(None) => Err(io::GetBlockError::NotAvailable), - Err(err) => Err(io::GetBlockError::Internal(err)), - }, - ); - Ok(()) - }); - } + service.run(ctx, stream).await?; Ok(()) - }); + }) + .await; - if let Some(consensus_state) = &state.consensus { - run_address_announcer(ctx, state, consensus_state).await - } else { - Ok(()) - } - }) - .await - .ok(); + self.get_block_clients + .remove(peer.clone(), get_block_client); + res + } - Ok(()) + /// Handles an inbound stream. + /// Closes the stream if there is another inbound stream opened from the same peer. + pub(crate) async fn run_inbound_stream( + &self, + ctx: &ctx::Ctx, + mut stream: noise::Stream, + ) -> anyhow::Result<()> { + let peer = + handshake::inbound(ctx, &self.cfg.gossip, self.genesis().hash(), &mut stream).await?; + tracing::Span::current().record("peer", tracing::field::debug(&peer)); + self.inbound.insert(peer.clone()).await?; + let res = self.run_stream(ctx, &peer, stream).await; + self.inbound.remove(&peer).await; + res + } + + /// Connects to a peer and handles the resulting stream. + pub(crate) async fn run_outbound_stream( + &self, + ctx: &ctx::Ctx, + peer: &node::PublicKey, + addr: std::net::SocketAddr, + ) -> anyhow::Result<()> { + let mut stream = preface::connect(ctx, addr, preface::Endpoint::GossipNet).await?; + handshake::outbound( + ctx, + &self.cfg.gossip, + self.genesis().hash(), + &mut stream, + peer, + ) + .await?; + + self.outbound.insert(peer.clone()).await?; + let res = self.run_stream(ctx, peer, stream).await; + self.outbound.remove(peer).await; + res + } } diff --git a/node/actors/network/src/gossip/state.rs b/node/actors/network/src/gossip/state.rs index 3b33631f..8b137891 100644 --- a/node/actors/network/src/gossip/state.rs +++ b/node/actors/network/src/gossip/state.rs @@ -1,216 +1 @@ -use crate::{pool::PoolWatch, rpc, watch::Watch}; -use anyhow::Context as _; -use std::{ - collections::{HashMap, HashSet}, - sync::{Arc, Mutex}, -}; -use zksync_concurrency::{ctx, sync}; -use zksync_consensus_roles::{node, validator}; -use zksync_consensus_storage::BlockStore; -use zksync_protobuf::kB; -/// Mapping from validator::PublicKey to a signed validator::NetAddress. -/// Represents the currents state of node's knowledge about the validator endpoints. -#[derive(Clone, Default, PartialEq, Eq)] -pub(crate) struct ValidatorAddrs( - pub(super) im::HashMap>>, -); - -impl ValidatorAddrs { - /// Gets a NetAddress for a given key. - pub(crate) fn get( - &self, - key: &validator::PublicKey, - ) -> Option<&Arc>> { - self.0.get(key) - } - - /// Returns a set of entries of `self` which are newer than the entries in `b`. - pub(super) fn get_newer(&self, b: &Self) -> Vec>> { - let mut newer = vec![]; - for (k, v) in &self.0 { - if let Some(bv) = b.0.get(k) { - if !v.msg.is_newer(&bv.msg) { - continue; - } - } - newer.push(v.clone()); - } - newer - } - - /// Updates the discovery map with entries from `data`. - /// It exits as soon as an invalid entry is found. - /// `self` might get modified even if an error is returned - /// (all entries verified so far are added). - /// Returns true iff some new entry was added. - pub(super) fn update( - &mut self, - validators: &validator::ValidatorSet, - data: &[Arc>], - ) -> anyhow::Result { - let mut changed = false; - - let mut done = HashSet::new(); - for d in data { - // Disallow multiple entries for the same key: - // it is important because a malicious validator may spam us with - // new versions and verifying signatures is expensive. - if done.contains(&d.key) { - anyhow::bail!("duplicate entry for {:?}", d.key); - } - done.insert(d.key.clone()); - if !validators.contains(&d.key) { - // We just skip the entries we are not interested in. - // For now the set of validators is static, so we could treat this as an error, - // however we eventually want the validator set to be dynamic. - continue; - } - if let Some(x) = self.0.get(&d.key) { - if !d.msg.is_newer(&x.msg) { - continue; - } - } - d.verify()?; - self.0.insert(d.key.clone(), d.clone()); - changed = true; - } - Ok(changed) - } -} - -/// Watch wrapper of ValidatorAddrs, -/// which supports subscribing to ValidatorAddr updates. -pub(crate) struct ValidatorAddrsWatch(Watch); - -impl Default for ValidatorAddrsWatch { - fn default() -> Self { - Self(Watch::new(ValidatorAddrs::default())) - } -} - -impl ValidatorAddrsWatch { - /// Subscribes to ValidatorAddrs updates. - pub(crate) fn subscribe(&self) -> sync::watch::Receiver { - self.0.subscribe() - } - - /// Inserts data to ValidatorAddrs. - /// Subscribers are notified iff at least 1 new entry has - /// been inserted. Returns an error iff an invalid - /// entry in `data` has been found. The provider of the - /// invalid entry should be banned. - pub(crate) async fn update( - &self, - validators: &validator::ValidatorSet, - data: &[Arc>], - ) -> anyhow::Result<()> { - let this = self.0.lock().await; - let mut validator_addrs = this.borrow().clone(); - if validator_addrs.update(validators, data)? { - this.send(validator_addrs).ok().unwrap(); - } - Ok(()) - } -} - -/// Gossip network configuration. -#[derive(Debug, Clone)] -pub struct Config { - /// Private key of the node, every node should have one. - pub key: node::SecretKey, - /// Limit on the number of inbound connections outside - /// of the `static_inbound` set. - pub dynamic_inbound_limit: usize, - /// Inbound connections that should be unconditionally accepted. - pub static_inbound: HashSet, - /// Outbound connections that the node should actively try to - /// establish and maintain. - pub static_outbound: HashMap, -} - -/// Multimap of pointers indexed by `node::PublicKey`. -/// Used to maintain a collection GetBlock rpc clients. -/// TODO(gprusak): consider upgrading PoolWatch instead. -pub(crate) struct ArcMap(Mutex>>>); - -impl Default for ArcMap { - fn default() -> Self { - Self(Mutex::default()) - } -} - -impl ArcMap { - /// Fetches any pointer for the given key. - pub(crate) fn get_any(&self, key: &node::PublicKey) -> Option> { - self.0.lock().unwrap().get(key)?.first().cloned() - } - - /// Insert a pointer. - pub(crate) fn insert(&self, key: node::PublicKey, p: Arc) { - self.0.lock().unwrap().entry(key).or_default().push(p); - } - - /// Removes a pointer. - pub(crate) fn remove(&self, key: node::PublicKey, p: Arc) { - let mut this = self.0.lock().unwrap(); - use std::collections::hash_map::Entry; - let Entry::Occupied(mut e) = this.entry(key) else { - return; - }; - e.get_mut().retain(|c| !Arc::ptr_eq(&p, c)); - if e.get_mut().is_empty() { - e.remove(); - } - } -} - -/// Gossip network state. -pub(crate) struct State { - /// Gossip network configuration. - pub(crate) cfg: Config, - /// Currently open inbound connections. - pub(crate) inbound: PoolWatch, - /// Currently open outbound connections. - pub(crate) outbound: PoolWatch, - /// Current state of knowledge about validators' endpoints. - pub(crate) validator_addrs: ValidatorAddrsWatch, - /// Block store to serve `get_block` requests from. - pub(crate) block_store: Arc, - /// Clients for `get_block` requests for each currently active peer. - pub(crate) get_block_clients: ArcMap>, -} - -impl State { - /// Constructs a new State. - pub(crate) fn new(cfg: Config, block_store: Arc) -> Self { - Self { - inbound: PoolWatch::new(cfg.static_inbound.clone(), cfg.dynamic_inbound_limit), - outbound: PoolWatch::new(cfg.static_outbound.keys().cloned().collect(), 0), - validator_addrs: ValidatorAddrsWatch::default(), - block_store, - get_block_clients: ArcMap::default(), - cfg, - } - } - - pub(super) async fn get_block( - &self, - ctx: &ctx::Ctx, - recipient: &node::PublicKey, - number: validator::BlockNumber, - max_block_size: usize, - ) -> anyhow::Result> { - Ok(self - .get_block_clients - .get_any(recipient) - .context("recipient is unreachable")? - .call( - ctx, - &rpc::get_block::Req(number), - max_block_size.saturating_add(kB), - ) - .await? - .0) - } -} diff --git a/node/actors/network/src/gossip/tests.rs b/node/actors/network/src/gossip/tests.rs index 3d4ab322..575a8492 100644 --- a/node/actors/network/src/gossip/tests.rs +++ b/node/actors/network/src/gossip/tests.rs @@ -1,11 +1,11 @@ use super::*; -use crate::{event::Event, io, preface, rpc, rpc::Rpc as _, testonly}; -use anyhow::Context as _; +use crate::{io, metrics, preface, rpc, testonly}; +use assert_matches::assert_matches; use pretty_assertions::assert_eq; use rand::Rng; use std::{ collections::{HashMap, HashSet}, - sync::Arc, + sync::{atomic::Ordering, Arc}, }; use test_casing::{test_casing, Product}; use tracing::Instrument as _; @@ -23,14 +23,14 @@ async fn test_one_connection_per_node() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 5); + let setup = validator::testonly::Setup::new(rng, 5); let cfgs = testonly::new_configs(rng, &setup, 2); scope::run!(ctx, |ctx,s| async { - let (store,runner) = new_store(ctx,&setup.blocks[0]).await; + let (store,runner) = new_store(ctx,&setup.genesis).await; s.spawn_bg(runner.run(ctx)); let mut nodes : Vec<_> = cfgs.iter().enumerate().map(|(i,cfg)| { - let (node,runner) = testonly::Instance::new(cfg.clone(), store.clone()); + let (node,runner) = testonly::Instance::new(ctx, cfg.clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }).collect(); @@ -52,7 +52,7 @@ async fn test_one_connection_per_node() { .await .context("preface::connect")?; - handshake::outbound(ctx, &cfgs[0].gossip, &mut stream, peer) + handshake::outbound(ctx, &cfgs[0].gossip, setup.genesis.hash(), &mut stream, peer) .await .context("handshake::outbound")?; tracing::info!("The connection is expected to be closed automatically by peer."); @@ -225,17 +225,17 @@ async fn test_validator_addrs_propagation() { abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(40.)); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 10); + let setup = validator::testonly::Setup::new(rng, 10); let cfgs = testonly::new_configs(rng, &setup, 1); scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); let nodes: Vec<_> = cfgs .iter() .enumerate() .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg.clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }) @@ -243,13 +243,15 @@ async fn test_validator_addrs_propagation() { let want: HashMap<_, _> = cfgs .iter() .map(|cfg| { - let cfg = cfg.consensus.as_ref().unwrap(); - (cfg.key.public(), cfg.public_addr) + ( + cfg.validator_key.as_ref().unwrap().public(), + cfg.public_addr, + ) }) .collect(); for (i, node) in nodes.iter().enumerate() { tracing::info!("awaiting for node[{i}] to learn validator_addrs"); - let sub = &mut node.state.gossip.validator_addrs.subscribe(); + let sub = &mut node.net.gossip.validator_addrs.subscribe(); sync::wait_for(ctx, sub, |got| want == to_addr_map(got)).await?; } Ok(()) @@ -258,6 +260,57 @@ async fn test_validator_addrs_propagation() { .unwrap(); } +#[tokio::test] +async fn test_genesis_mismatch() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = validator::testonly::Setup::new(rng, 2); + let cfgs = testonly::new_configs(rng, &setup, 1); + + scope::run!(ctx, |ctx, s| async { + let mut listener = cfgs[1].server_addr.bind().context("server_addr.bind()")?; + + tracing::info!("Start one node, we will simulate the other one."); + let (store, runner) = new_store(ctx, &setup.genesis).await; + s.spawn_bg(runner.run(ctx)); + let (_node, runner) = testonly::Instance::new(ctx, cfgs[0].clone(), store.clone()); + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + + tracing::info!("Accept a connection with mismatching genesis."); + let stream = metrics::MeteredStream::listen(ctx, &mut listener) + .await? + .context("listen()")?; + let (mut stream, endpoint) = preface::accept(ctx, stream) + .await + .context("preface::accept()")?; + assert_eq!(endpoint, preface::Endpoint::GossipNet); + tracing::info!("Expect the handshake to fail"); + let res = handshake::inbound(ctx, &cfgs[1].gossip, rng.gen(), &mut stream).await; + assert_matches!(res, Err(handshake::Error::GenesisMismatch)); + + tracing::info!("Try to connect to a node with a mismatching genesis."); + let mut stream = preface::connect(ctx, cfgs[0].public_addr, preface::Endpoint::GossipNet) + .await + .context("preface::connect")?; + let res = handshake::outbound( + ctx, + &cfgs[1].gossip, + rng.gen(), + &mut stream, + &cfgs[0].gossip.key.public(), + ) + .await; + tracing::info!( + "Expect the peer to verify the mismatching Genesis and close the connection." + ); + assert_matches!(res, Err(handshake::Error::Stream(_))); + Ok(()) + }) + .await + .unwrap(); +} + const EXCHANGED_STATE_COUNT: usize = 5; const NETWORK_CONNECTIVITY_CASES: [(usize, usize); 5] = [(2, 1), (3, 2), (5, 3), (10, 4), (10, 7)]; @@ -272,21 +325,21 @@ async fn syncing_blocks(node_count: usize, gossip_peers: usize) { let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); let rng = &mut ctx.rng(); - let mut setup = validator::testonly::GenesisSetup::new(rng, node_count); + let mut setup = validator::testonly::Setup::new(rng, node_count); + setup.push_blocks(rng, EXCHANGED_STATE_COUNT); let cfgs = testonly::new_configs(rng, &setup, gossip_peers); scope::run!(ctx, |ctx, s| async { let mut nodes = vec![]; for (i, cfg) in cfgs.into_iter().enumerate() { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); - let (node, runner) = testonly::Instance::new(cfg, store); + let (node, runner) = testonly::Instance::new(ctx, cfg, store); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); nodes.push(node); } - setup.push_blocks(rng, EXCHANGED_STATE_COUNT); for block in &setup.blocks { for node in &nodes { - node.state + node.net .gossip .block_store .queue_block(ctx, block.clone()) @@ -319,7 +372,7 @@ async fn wait_for_updates( else { continue; }; - if state.last == block.justification { + if state.last.as_ref() == Some(&block.justification) { updates.insert(peer); } response.send(()).ok(); @@ -345,7 +398,7 @@ async fn uncoordinated_block_syncing( let ctx = &ctx::test_root(&ctx::AffineClock::new(20.0)); let rng = &mut ctx.rng(); - let mut setup = validator::testonly::GenesisSetup::empty(rng, node_count); + let mut setup = validator::testonly::Setup::new(rng, node_count); setup.push_blocks(rng, EXCHANGED_STATE_COUNT); scope::run!(ctx, |ctx, s| async { for (i, cfg) in testonly::new_configs(rng, &setup, gossip_peers) @@ -353,13 +406,13 @@ async fn uncoordinated_block_syncing( .enumerate() { let i = i; - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); - let (node, runner) = testonly::Instance::new(cfg, store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg, store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); s.spawn(async { let store = store; - for block in &setup.blocks[1..] { + for block in &setup.blocks { ctx.sleep(state_generation_interval).await?; store.queue_block(ctx, block.clone()).await.unwrap(); } @@ -383,20 +436,26 @@ async fn getting_blocks_from_peers(node_count: usize, gossip_peers: usize) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, node_count); + let mut setup = validator::testonly::Setup::new(rng, node_count); + setup.push_blocks(rng, 1); let cfgs = testonly::new_configs(rng, &setup, gossip_peers); // All inbound and outbound peers should answer the request. let expected_successful_responses = (2 * gossip_peers).min(node_count - 1); - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; scope::run!(ctx, |ctx, s| async { + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); + store + .queue_block(ctx, setup.blocks[0].clone()) + .await + .unwrap(); + let mut nodes: Vec<_> = cfgs .into_iter() .enumerate() .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg, store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg, store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }) @@ -410,7 +469,7 @@ async fn getting_blocks_from_peers(node_count: usize, gossip_peers: usize) { let (response, response_receiver) = oneshot::channel(); node.pipe.send( io::SyncBlocksInputMessage::GetBlock { - recipient: peer.state.gossip.cfg.key.public(), + recipient: peer.net.gossip.cfg.gossip.key.public(), number: setup.blocks[0].header().number, response, } @@ -429,7 +488,7 @@ async fn getting_blocks_from_peers(node_count: usize, gossip_peers: usize) { let last = nodes.pop().unwrap(); last.terminate(ctx).await?; - let stopped_node_key = last.state.gossip.cfg.key.public(); + let stopped_node_key = last.net.gossip.cfg.gossip.key.public(); for node in &nodes { tracing::info!("wait for disconnection"); node.wait_for_gossip_disconnect(ctx, &stopped_node_key) @@ -463,17 +522,21 @@ async fn validator_node_restart() { abort_on_panic(); let _guard = set_timeout(time::Duration::seconds(5)); - let clock = &ctx::ManualClock::new(); - let ctx = &ctx::test_root(clock); + let clock = ctx::ManualClock::new(); + let ctx = &ctx::test_root(&clock); let rng = &mut ctx.rng(); let zero = time::Duration::ZERO; let sec = time::Duration::seconds(1); - let setup = validator::testonly::GenesisSetup::new(rng, 2); + let setup = validator::testonly::Setup::new(rng, 2); let mut cfgs = testonly::new_configs(rng, &setup, 1); - let (store, store_runner) = new_store(ctx, &setup.blocks[0]).await; - let (mut node1, node1_runner) = testonly::Instance::new(cfgs[1].clone(), store.clone()); + // Set the rpc refresh time to 0, so that any updates are immediately propagated. + for cfg in &mut cfgs { + cfg.rpc.push_validator_addrs_rate.refresh = time::Duration::ZERO; + } + let (store, store_runner) = new_store(ctx, &setup.genesis).await; + let (node1, node1_runner) = testonly::Instance::new(ctx, cfgs[1].clone(), store.clone()); scope::run!(ctx, |ctx, s| async { s.spawn_bg(store_runner.run(ctx)); s.spawn_bg( @@ -481,15 +544,6 @@ async fn validator_node_restart() { .run(ctx) .instrument(tracing::info_span!("node1")), ); - s.spawn_bg(async { - // Progress time whenever node1 receives an update. - // TODO(gprusak): alternatively we could entirely disable time progress - // by setting refresh time to 0 in tests. - while let Ok(Event::ValidatorAddrsUpdated) = node1.events.recv(ctx).await { - clock.advance(rpc::push_validator_addrs::Rpc::RATE.refresh); - } - Ok(()) - }); // We restart the node0 after shifting the UTC clock back and forth. // node0 is expected to learn what was is the currently broadcasted @@ -497,11 +551,9 @@ async fn validator_node_restart() { let mut utc_times = HashSet::new(); let start = ctx.now_utc(); for clock_shift in [zero, sec, -2 * sec, 4 * sec, 10 * sec, -30 * sec] { - tracing::error!("DUPA {clock_shift}"); // Set the new addr to broadcast. - let mutated_config = cfgs[0].consensus.as_mut().unwrap(); let addr0 = mk_addr(rng); - mutated_config.public_addr = addr0; + cfgs[0].public_addr = addr0; // Shift the UTC clock. let now = start + clock_shift; assert!( @@ -511,18 +563,17 @@ async fn validator_node_restart() { clock.set_utc(now); tracing::info!("now = {now:?}"); + // _node0 contains pipe, which has to exist to prevent the connection from dying + // early. + let (_node0, runner) = testonly::Instance::new(ctx, cfgs[0].clone(), store.clone()); scope::run!(ctx, |ctx, s| async { - // _node0 contains pipe, which has to exist to prevent the connection from dying - // early. - let (_node0, runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node0"))); tracing::info!("wait for the update to arrive to node1"); - let sub = &mut node1.state.gossip.validator_addrs.subscribe(); + let sub = &mut node1.net.gossip.validator_addrs.subscribe(); sync::wait_for(ctx, sub, |got| { let Some(got) = got.get(&setup.keys[0].public()) else { return false; }; - tracing::info!("got.addr = {}", got.msg.addr); got.msg.addr == addr0 }) .await?; @@ -549,32 +600,35 @@ async fn rate_limiting() { // construct star topology. let n = 10; - let setup = validator::testonly::GenesisSetup::new(rng, n); + let setup = validator::testonly::Setup::new(rng, n); let mut cfgs = testonly::new_configs(rng, &setup, 0); let want: HashMap<_, _> = cfgs .iter() .map(|cfg| { - let consensus_cfg = cfg.consensus.as_ref().unwrap(); - (consensus_cfg.key.public(), consensus_cfg.public_addr) + ( + cfg.validator_key.as_ref().unwrap().public(), + cfg.public_addr, + ) }) .collect(); for i in 1..n { let key = cfgs[i].gossip.key.public().clone(); - let public_addr = cfgs[i].consensus.as_ref().unwrap().public_addr; + let public_addr = cfgs[i].public_addr; cfgs[0].gossip.static_outbound.insert(key, public_addr); } let mut nodes = vec![]; scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); // Spawn the satellite nodes and wait until they register // their own address. for (i, cfg) in cfgs[1..].iter().enumerate() { - let (node, runner) = testonly::Instance::new(cfg.clone(), store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg.clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); - let sub = &mut node.state.gossip.validator_addrs.subscribe(); + let sub = &mut node.net.gossip.validator_addrs.subscribe(); sync::wait_for(ctx, sub, |got| { - got.get(&node.consensus_config().key.public()).is_some() + got.get(&node.cfg().validator_key.as_ref().unwrap().public()) + .is_some() }) .await .unwrap(); @@ -582,15 +636,15 @@ async fn rate_limiting() { } // Spawn the center node. - let (center, runner) = testonly::Instance::new(cfgs[0].clone(), store.clone()); + let (center, runner) = testonly::Instance::new(ctx, cfgs[0].clone(), store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node[0]"))); // Await for the center to receive all validator addrs. - let sub = &mut center.state.gossip.validator_addrs.subscribe(); + let sub = &mut center.net.gossip.validator_addrs.subscribe(); sync::wait_for(ctx, sub, |got| want == to_addr_map(got)).await?; // Advance time and wait for all other nodes to receive validator addrs. - clock.advance(rpc::push_validator_addrs::Rpc::RATE.refresh); + clock.advance(center.cfg().rpc.push_validator_addrs_rate.refresh); for node in &nodes { - let sub = &mut node.state.gossip.validator_addrs.subscribe(); + let sub = &mut node.net.gossip.validator_addrs.subscribe(); sync::wait_for(ctx, sub, |got| want == to_addr_map(got)).await?; } Ok(()) @@ -600,10 +654,11 @@ async fn rate_limiting() { // Check that the satellite nodes received either 1 or 2 updates. for n in &mut nodes { - let mut count = 0; - while let Some(Event::ValidatorAddrsUpdated) = n.events.try_recv() { - count += 1; - } - assert!((1..=2).contains(&count)); + let got = n + .net + .gossip + .push_validator_addrs_calls + .load(Ordering::SeqCst); + assert!((1..=2).contains(&got), "got {got} want 1 or 2"); } } diff --git a/node/actors/network/src/gossip/validator_addrs.rs b/node/actors/network/src/gossip/validator_addrs.rs new file mode 100644 index 00000000..ed176bf5 --- /dev/null +++ b/node/actors/network/src/gossip/validator_addrs.rs @@ -0,0 +1,110 @@ +//! Global state distributed by active validators, observed by all the nodes in the network. +use crate::watch::Watch; +use std::{collections::HashSet, sync::Arc}; +use zksync_concurrency::sync; +use zksync_consensus_roles::validator; + +/// Mapping from validator::PublicKey to a signed validator::NetAddress. +/// Represents the currents state of node's knowledge about the validator endpoints. +#[derive(Clone, Default, PartialEq, Eq)] +pub(crate) struct ValidatorAddrs( + pub(super) im::HashMap>>, +); + +impl ValidatorAddrs { + /// Gets a NetAddress for a given key. + pub(crate) fn get( + &self, + key: &validator::PublicKey, + ) -> Option<&Arc>> { + self.0.get(key) + } + + /// Returns a set of entries of `self` which are newer than the entries in `b`. + pub(super) fn get_newer(&self, b: &Self) -> Vec>> { + let mut newer = vec![]; + for (k, v) in &self.0 { + if let Some(bv) = b.0.get(k) { + if !v.msg.is_newer(&bv.msg) { + continue; + } + } + newer.push(v.clone()); + } + newer + } + + /// Updates the discovery map with entries from `data`. + /// It exits as soon as an invalid entry is found. + /// `self` might get modified even if an error is returned + /// (all entries verified so far are added). + /// Returns true iff some new entry was added. + pub(super) fn update( + &mut self, + validators: &validator::ValidatorSet, + data: &[Arc>], + ) -> anyhow::Result { + let mut changed = false; + + let mut done = HashSet::new(); + for d in data { + // Disallow multiple entries for the same key: + // it is important because a malicious validator may spam us with + // new versions and verifying signatures is expensive. + if done.contains(&d.key) { + anyhow::bail!("duplicate entry for {:?}", d.key); + } + done.insert(d.key.clone()); + if !validators.contains(&d.key) { + // We just skip the entries we are not interested in. + // For now the set of validators is static, so we could treat this as an error, + // however we eventually want the validator set to be dynamic. + continue; + } + if let Some(x) = self.0.get(&d.key) { + if !d.msg.is_newer(&x.msg) { + continue; + } + } + d.verify()?; + self.0.insert(d.key.clone(), d.clone()); + changed = true; + } + Ok(changed) + } +} + +/// Watch wrapper of ValidatorAddrs, +/// which supports subscribing to ValidatorAddr updates. +pub(crate) struct ValidatorAddrsWatch(Watch); + +impl Default for ValidatorAddrsWatch { + fn default() -> Self { + Self(Watch::new(ValidatorAddrs::default())) + } +} + +impl ValidatorAddrsWatch { + /// Subscribes to ValidatorAddrs updates. + pub(crate) fn subscribe(&self) -> sync::watch::Receiver { + self.0.subscribe() + } + + /// Inserts data to ValidatorAddrs. + /// Subscribers are notified iff at least 1 new entry has + /// been inserted. Returns an error iff an invalid + /// entry in `data` has been found. The provider of the + /// invalid entry should be banned. + pub(crate) async fn update( + &self, + validators: &validator::ValidatorSet, + data: &[Arc>], + ) -> anyhow::Result<()> { + let this = self.0.lock().await; + let mut validator_addrs = this.borrow().clone(); + if validator_addrs.update(validators, data)? { + this.send(validator_addrs).ok().unwrap(); + } + Ok(()) + } +} diff --git a/node/actors/network/src/lib.rs b/node/actors/network/src/lib.rs index 0f811d39..73c25cb4 100644 --- a/node/actors/network/src/lib.rs +++ b/node/actors/network/src/lib.rs @@ -1,11 +1,12 @@ //! Network actor maintaining a pool of outbound and inbound connections to other nodes. +use anyhow::Context as _; +use std::sync::Arc; +use zksync_concurrency::{ctx, ctx::channel, scope, time}; +use zksync_consensus_storage::BlockStore; +use zksync_consensus_utils::pipe::ActorPipe; -// &*x is not equivalent to x, because it affects borrowing in closures. -#![allow(clippy::borrow_deref_ref)] - -pub use state::*; +mod config; pub mod consensus; -mod event; mod frame; pub mod gossip; pub mod io; @@ -21,3 +22,186 @@ pub mod testonly; #[cfg(test)] mod tests; mod watch; + +pub use config::*; + +/// State of the network actor observable outside of the actor. +pub struct Network { + /// Consensus network state. + pub(crate) consensus: Option>, + /// Gossip network state. + pub(crate) gossip: Arc, +} + +/// Runner of the Network background tasks. +#[must_use] +pub struct Runner { + /// Network state. + net: Arc, + /// Receiver of the messages from the dispatcher. + receiver: channel::UnboundedReceiver, +} + +impl Network { + /// Constructs a new network actor state. + /// Call `run_network` to run the actor. + pub fn new( + ctx: &ctx::Ctx, + cfg: Config, + block_store: Arc, + pipe: ActorPipe, + ) -> (Arc, Runner) { + let gossip = gossip::Network::new(cfg, block_store, pipe.send); + let consensus = consensus::Network::new(ctx, gossip.clone()); + let net = Arc::new(Self { gossip, consensus }); + ( + net.clone(), + Runner { + net, + receiver: pipe.recv, + }, + ) + } + + /// Registers metrics for this state. + pub fn register_metrics(self: &Arc) { + metrics::NetworkGauges::register(Arc::downgrade(self)); + } + + /// Handles a dispatcher message. + async fn handle_message( + &self, + ctx: &ctx::Ctx, + message: io::InputMessage, + ) -> anyhow::Result<()> { + /// Timeout for handling a consensus message. + const CONSENSUS_MSG_TIMEOUT: time::Duration = time::Duration::seconds(10); + /// Timeout for a GetBlock RPC. + const GET_BLOCK_TIMEOUT: time::Duration = time::Duration::seconds(10); + + match message { + io::InputMessage::Consensus(message) => { + let consensus = self.consensus.as_ref().context("not a validator node")?; + let ctx = &ctx.with_timeout(CONSENSUS_MSG_TIMEOUT); + match message.recipient { + io::Target::Validator(key) => { + consensus.send(ctx, &key, message.message).await? + } + io::Target::Broadcast => consensus.broadcast(ctx, message.message).await?, + } + } + io::InputMessage::SyncBlocks(io::SyncBlocksInputMessage::GetBlock { + recipient, + number, + response, + }) => { + let ctx = &ctx.with_timeout(GET_BLOCK_TIMEOUT); + let _ = response.send(match self.gossip.get_block(ctx, &recipient, number).await { + Ok(Some(block)) => Ok(block), + Ok(None) => Err(io::GetBlockError::NotAvailable), + Err(err) => Err(io::GetBlockError::Internal(err)), + }); + } + } + Ok(()) + } +} + +impl Runner { + /// Runs the network actor. + pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + let mut listener = self + .net + .gossip + .cfg + .server_addr + .bind() + .context("server_addr.bind()")?; + + scope::run!(ctx, |ctx, s| async { + // Handle incoming messages. + s.spawn(async { + // We don't propagate cancellation errors + while let Ok(message) = self.receiver.recv(ctx).await { + s.spawn(async { + if let Err(err) = self.net.handle_message(ctx, message).await { + tracing::info!("handle_message(): {err:#}"); + } + Ok(()) + }); + } + Ok(()) + }); + + // Maintain static gossip connections. + for (peer, addr) in &self.net.gossip.cfg.gossip.static_outbound { + s.spawn(async { + loop { + let run_result = + self.net.gossip.run_outbound_stream(ctx, peer, *addr).await; + if let Err(err) = run_result { + tracing::info!("gossip.run_outbound_stream(): {err:#}"); + } + if let Err(ctx::Canceled) = ctx.sleep(CONNECT_RETRY).await { + return Ok(()); + } + } + }); + } + + if let Some(c) = &self.net.consensus { + // If we are active validator ... + if c.gossip.genesis().validators.contains(&c.key.public()) { + // Maintain outbound connections. + for peer in c.clients.keys() { + s.spawn(async { + c.maintain_connection(ctx, peer).await; + Ok(()) + }); + } + // Announce IP periodically. + s.spawn(async { + c.run_address_announcer(ctx).await; + Ok(()) + }); + } + } + + // TODO(gprusak): add rate limit and inflight limit for inbound handshakes. + while let Ok(stream) = metrics::MeteredStream::listen(ctx, &mut listener).await { + let stream = stream.context("listener.accept()")?; + s.spawn(async { + let res = async { + let (stream, endpoint) = preface::accept(ctx, stream) + .await + .context("preface::accept()")?; + match endpoint { + preface::Endpoint::ConsensusNet => { + if let Some(c) = &self.net.consensus { + c.run_inbound_stream(ctx, stream) + .await + .context("consensus.run_inbound_stream()")?; + } + } + preface::Endpoint::GossipNet => { + self.net + .gossip + .run_inbound_stream(ctx, stream) + .await + .context("gossip.run_inbound_stream()")?; + } + } + anyhow::Ok(()) + } + .await; + if let Err(err) = res { + tracing::info!("{err:#}"); + } + Ok(()) + }); + } + Ok(()) + }) + .await + } +} diff --git a/node/actors/network/src/metrics.rs b/node/actors/network/src/metrics.rs index 8503b7da..86fa1cf5 100644 --- a/node/actors/network/src/metrics.rs +++ b/node/actors/network/src/metrics.rs @@ -1,6 +1,6 @@ //! General-purpose network metrics. -use crate::state::State; +use crate::Network; use std::{ net::SocketAddr, pin::Pin, @@ -138,7 +138,7 @@ pub(crate) struct NetworkGauges { impl NetworkGauges { /// Registers a metrics collector for the specified state. - pub(crate) fn register(state_ref: Weak) { + pub(crate) fn register(state_ref: Weak) { #[vise::register] static COLLECTOR: Collector> = Collector::new(); diff --git a/node/actors/network/src/mux/tests/mod.rs b/node/actors/network/src/mux/tests/mod.rs index e797a917..0c43d0ea 100644 --- a/node/actors/network/src/mux/tests/mod.rs +++ b/node/actors/network/src/mux/tests/mod.rs @@ -9,7 +9,6 @@ use std::{ }, }; use zksync_concurrency::{ctx, scope, testonly::abort_on_panic}; -use zksync_consensus_utils::no_copy::NoCopy; use zksync_protobuf::ProtoFmt as _; mod proto; @@ -193,10 +192,6 @@ fn expected(res: Result<(), mux::RunError>) -> Result<(), mux::RunError> { // * multiple capabilities are used at the same time. // * ends use totally different configs // * messages are larger than frames -// -// TODO(gprusak): in case the test fails it may be hard to find the actual bug, because -// this test covers a lot of features. In such situation more specific tests -// checking 1 property at a time should be added. #[test] fn mux_with_noise() { abort_on_panic(); @@ -247,35 +242,35 @@ fn mux_with_noise() { scope::run!(ctx, |ctx, s| async { let (s1, s2) = noise::testonly::pipe(ctx).await; for (cap, q) in mux1.connect.clone() { - let cap = NoCopy::from(cap); + let cap = ctx::NoCopy(cap); s.spawn_bg(async { run_server(ctx, q, *cap) .await - .with_context(|| format!("server({})", cap.into_inner())) + .with_context(|| format!("server({})", cap.into())) }); } for (cap, q) in mux1.accept.clone() { - let cap = NoCopy::from(cap); + let cap = ctx::NoCopy(cap); s.spawn(async { run_client(ctx, q, *cap) .await - .with_context(|| format!("client({})", cap.into_inner())) + .with_context(|| format!("client({})", cap.into())) }); } for (cap, q) in mux2.connect.clone() { - let cap = NoCopy::from(cap); + let cap = ctx::NoCopy(cap); s.spawn_bg(async { run_server(ctx, q, *cap) .await - .with_context(|| format!("server({})", cap.into_inner())) + .with_context(|| format!("server({})", cap.into())) }); } for (cap, q) in mux2.accept.clone() { - let cap = NoCopy::from(cap); + let cap = ctx::NoCopy(cap); s.spawn(async { run_client(ctx, q, *cap) .await - .with_context(|| format!("client({})", cap.into_inner())) + .with_context(|| format!("client({})", cap.into())) }); } s.spawn_bg(async { expected(mux1.run(ctx, s1).await).context("mux1.run()") }); diff --git a/node/actors/network/src/proto/consensus.proto b/node/actors/network/src/proto/consensus.proto index e86aaeaf..919b6d57 100644 --- a/node/actors/network/src/proto/consensus.proto +++ b/node/actors/network/src/proto/consensus.proto @@ -7,7 +7,8 @@ import "zksync/std.proto"; // First message exchanged in the encrypted session. message Handshake { - optional roles.validator.Signed session_id = 1; + optional roles.validator.Signed session_id = 1; // required + optional roles.validator.GenesisHash genesis = 2; // required } message ConsensusReq { diff --git a/node/actors/network/src/proto/gossip.proto b/node/actors/network/src/proto/gossip.proto index 976ba1b8..db219151 100644 --- a/node/actors/network/src/proto/gossip.proto +++ b/node/actors/network/src/proto/gossip.proto @@ -7,8 +7,9 @@ import "zksync/roles/validator.proto"; // First message exchanged in the encrypted session. message Handshake { - optional roles.node.Signed session_id = 1; - optional bool is_static = 2; + optional roles.node.Signed session_id = 1; // required + optional roles.validator.GenesisHash genesis = 3; // required + optional bool is_static = 2; // required } message PushValidatorAddrs { @@ -21,9 +22,9 @@ message PushValidatorAddrs { // and actively fetch newest blocks. message PushBlockStoreState { // First L2 block that the node has locally. - optional roles.validator.CommitQC first = 1; + optional uint64 first = 1; // required; BlockNumber // Last L2 block that the node has locally. - optional roles.validator.CommitQC last = 2; + optional roles.validator.CommitQC last = 2; // optional } // Asks the server to send an L2 block (including its transactions). diff --git a/node/actors/network/src/rpc/consensus.rs b/node/actors/network/src/rpc/consensus.rs index 498cd054..40082c2a 100644 --- a/node/actors/network/src/rpc/consensus.rs +++ b/node/actors/network/src/rpc/consensus.rs @@ -1,6 +1,5 @@ //! Defines RPC for passing consensus messages. use crate::{mux, proto::consensus as proto}; -use zksync_concurrency::{limiter, time}; use zksync_consensus_roles::validator; use zksync_protobuf::{read_required, ProtoFmt}; @@ -10,10 +9,6 @@ pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 0; const INFLIGHT: u32 = 3; - const RATE: limiter::Rate = limiter::Rate { - burst: 10, - refresh: time::Duration::ZERO, - }; const METHOD: &'static str = "consensus"; type Req = Req; type Resp = Resp; diff --git a/node/actors/network/src/rpc/get_block.rs b/node/actors/network/src/rpc/get_block.rs index c4996168..45b208d0 100644 --- a/node/actors/network/src/rpc/get_block.rs +++ b/node/actors/network/src/rpc/get_block.rs @@ -1,7 +1,6 @@ //! RPC for fetching a block from peer. use crate::{mux, proto::gossip as proto}; use anyhow::Context; -use zksync_concurrency::{limiter, time}; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; use zksync_protobuf::{read_optional, ProtoFmt}; @@ -13,10 +12,6 @@ pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 4; const INFLIGHT: u32 = 5; - const RATE: limiter::Rate = limiter::Rate { - burst: 10, - refresh: time::Duration::milliseconds(100), - }; const METHOD: &'static str = "get_block"; type Req = Req; diff --git a/node/actors/network/src/rpc/mod.rs b/node/actors/network/src/rpc/mod.rs index d98a08b3..184326cd 100644 --- a/node/actors/network/src/rpc/mod.rs +++ b/node/actors/network/src/rpc/mod.rs @@ -52,9 +52,6 @@ pub(crate) trait Rpc: Sync + Send + 'static { /// Maximal number of calls executed in parallel. /// Both client and server enforce this limit. const INFLIGHT: u32; - /// Maximal rate at which calls can be made. - /// Both client and server enforce this limit. - const RATE: limiter::Rate; /// Name of the RPC, used in prometheus metrics. const METHOD: &'static str; /// Type of the request message. @@ -122,9 +119,9 @@ pub(crate) struct Client { impl Client { /// Constructs a new client. - pub(crate) fn new(ctx: &ctx::Ctx) -> Self { + pub(crate) fn new(ctx: &ctx::Ctx, rate: limiter::Rate) -> Self { Client { - limiter: limiter::Limiter::new(ctx, R::RATE), + limiter: limiter::Limiter::new(ctx, rate), queue: mux::StreamQueue::new(R::INFLIGHT), _rpc: std::marker::PhantomData, } @@ -175,6 +172,7 @@ pub(crate) trait Handler: Sync + Send { struct Server> { handler: H, queue: Arc, + rate: limiter::Rate, _rpc: std::marker::PhantomData, } @@ -188,7 +186,7 @@ impl> ServerTrait for Server { /// Serves the incoming RPCs, respecting the rate limit and /// max inflight limit. async fn serve(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled<()> { - let limiter = limiter::Limiter::new(ctx, R::RATE); + let limiter = limiter::Limiter::new(ctx, self.rate); scope::run!(ctx, |ctx, s| async { for _ in 0..R::INFLIGHT { s.spawn::<()>(async { @@ -279,7 +277,11 @@ impl<'a> Service<'a> { } /// Adds a server to the RPC service. - pub(crate) fn add_server(mut self, handler: impl Handler + 'a) -> Self { + pub(crate) fn add_server( + mut self, + handler: impl Handler + 'a, + rate: limiter::Rate, + ) -> Self { let queue = mux::StreamQueue::new(R::INFLIGHT); if self .mux @@ -295,6 +297,7 @@ impl<'a> Service<'a> { self.servers.push(Box::new(Server { handler, queue, + rate, _rpc: std::marker::PhantomData, })); self diff --git a/node/actors/network/src/rpc/ping.rs b/node/actors/network/src/rpc/ping.rs index 5307eb1c..a11e07ed 100644 --- a/node/actors/network/src/rpc/ping.rs +++ b/node/actors/network/src/rpc/ping.rs @@ -1,5 +1,5 @@ //! Defines an RPC for sending ping messages. -use crate::{mux, proto::ping as proto, rpc::Rpc as _}; +use crate::{mux, proto::ping as proto}; use anyhow::Context as _; use rand::Rng; use zksync_concurrency::{ctx, limiter, time}; @@ -11,15 +11,19 @@ pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 2; const INFLIGHT: u32 = 1; - const RATE: limiter::Rate = limiter::Rate { - burst: 1, - refresh: time::Duration::seconds(10), - }; const METHOD: &'static str = "ping"; type Req = Req; type Resp = Resp; } +/// Hardcoded expected rate supported by the server. +/// This needs to be part of the protocol, so that both parties agree on when +/// connection is alive. +pub(crate) const RATE: limiter::Rate = limiter::Rate { + burst: 2, + refresh: time::Duration::seconds(1), +}; + /// Canonical Ping server implementation, /// which responds with data from the request. pub(crate) struct Server; @@ -35,7 +39,7 @@ impl super::Handler for Server { } impl super::Client { - /// Calls ping RPC every `Rpc.RATE.refresh`. + /// Sends a ping every `timeout`. /// Returns an error if any single ping request fails or /// exceeds `timeout`. pub(crate) async fn ping_loop( @@ -44,13 +48,17 @@ impl super::Client { timeout: time::Duration, ) -> anyhow::Result<()> { loop { - ctx.sleep(Rpc::RATE.refresh).await?; - let ctx = &ctx.with_timeout(timeout); let req = Req(ctx.rng().gen()); - let resp = self.call(ctx, &req, kB).await.context("ping")?; + let resp = self + .call(&ctx.with_timeout(timeout), &req, kB) + .await + .context("ping")?; if req.0 != resp.0 { anyhow::bail!("bad ping response"); } + if let Err(ctx::Canceled) = ctx.sleep(timeout).await { + return Ok(()); + } } } } diff --git a/node/actors/network/src/rpc/push_block_store_state.rs b/node/actors/network/src/rpc/push_block_store_state.rs index ef340c25..52c1161c 100644 --- a/node/actors/network/src/rpc/push_block_store_state.rs +++ b/node/actors/network/src/rpc/push_block_store_state.rs @@ -1,9 +1,9 @@ //! RPC for notifying peer about our BlockStore state. use crate::{mux, proto::gossip as proto}; use anyhow::Context; -use zksync_concurrency::{limiter, time}; +use zksync_consensus_roles::validator; use zksync_consensus_storage::BlockStoreState; -use zksync_protobuf::{read_required, ProtoFmt}; +use zksync_protobuf::{read_optional, required, ProtoFmt}; /// PushBlockStoreState RPC. #[derive(Debug)] @@ -12,10 +12,6 @@ pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 3; const INFLIGHT: u32 = 1; - const RATE: limiter::Rate = limiter::Rate { - burst: 2, - refresh: time::Duration::milliseconds(500), - }; const METHOD: &'static str = "push_block_store_state"; type Req = Req; @@ -31,15 +27,15 @@ impl ProtoFmt for Req { fn read(message: &Self::Proto) -> anyhow::Result { Ok(Self(BlockStoreState { - first: read_required(&message.first).context("first")?, - last: read_required(&message.last).context("last")?, + first: validator::BlockNumber(*required(&message.first).context("first")?), + last: read_optional(&message.last).context("last")?, })) } fn build(&self) -> Self::Proto { Self::Proto { - first: Some(self.0.first.build()), - last: Some(self.0.last.build()), + first: Some(self.0.first.0), + last: self.0.last.as_ref().map(|x| x.build()), } } } diff --git a/node/actors/network/src/rpc/push_validator_addrs.rs b/node/actors/network/src/rpc/push_validator_addrs.rs index 020a984f..c58803bf 100644 --- a/node/actors/network/src/rpc/push_validator_addrs.rs +++ b/node/actors/network/src/rpc/push_validator_addrs.rs @@ -2,7 +2,6 @@ use crate::{mux, proto::gossip as proto}; use anyhow::Context as _; use std::sync::Arc; -use zksync_concurrency::{limiter, time}; use zksync_consensus_roles::validator; use zksync_protobuf::ProtoFmt; @@ -12,10 +11,6 @@ pub(crate) struct Rpc; impl super::Rpc for Rpc { const CAPABILITY_ID: mux::CapabilityId = 1; const INFLIGHT: u32 = 1; - const RATE: limiter::Rate = limiter::Rate { - burst: 1, - refresh: time::Duration::seconds(5), - }; const METHOD: &'static str = "push_validator_addrs"; type Req = Req; diff --git a/node/actors/network/src/rpc/tests.rs b/node/actors/network/src/rpc/tests.rs index 96020721..3770320c 100644 --- a/node/actors/network/src/rpc/tests.rs +++ b/node/actors/network/src/rpc/tests.rs @@ -45,15 +45,21 @@ async fn test_ping() { let clock = ctx::ManualClock::new(); let ctx = &ctx::test_root(&clock); let (s1, s2) = noise::testonly::pipe(ctx).await; - let client = Client::::new(ctx); + let client = Client::::new(ctx, ping::RATE); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { - expected(Service::new().add_server(ping::Server).run(ctx, s1).await).context("server") + expected( + Service::new() + .add_server(ping::Server, ping::RATE) + .run(ctx, s1) + .await, + ) + .context("server") }); s.spawn_bg(async { expected(Service::new().add_client(&client).run(ctx, s2).await).context("client") }); - for _ in 0..ping::Rpc::RATE.burst { + for _ in 0..ping::RATE.burst { let req = ping::Req(ctx.rng().gen()); let resp = client.call(ctx, &req, kB).await?; assert_eq!(req.0, resp.0); @@ -63,7 +69,7 @@ async fn test_ping() { let req = ping::Req(ctx.rng().gen()); let resp = client.call(ctx, &req, kB).await?; assert_eq!(req.0, resp.0); - assert!(ctx.now() >= now + ping::Rpc::RATE.refresh); + assert!(ctx.now() >= now + ping::RATE.refresh); Ok(()) }) .await @@ -76,7 +82,7 @@ struct PingServer { } const PING_COUNT: u64 = 3; -const PING_TIMEOUT: time::Duration = time::Duration::seconds(3); +const PING_TIMEOUT: time::Duration = time::Duration::seconds(6); #[async_trait::async_trait] impl Handler for PingServer { @@ -101,8 +107,7 @@ async fn test_ping_loop() { clock.set_advance_on_sleep(); let ctx = &ctx::test_root(&clock); let (s1, s2) = noise::testonly::pipe(ctx).await; - let client = Client::::new(ctx); - let max = 5; + let client = Client::::new(ctx, ping::RATE); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { // Clock is passed to the server, so that it can @@ -112,13 +117,20 @@ async fn test_ping_loop() { pings: 0.into(), }; - // Use independent clock for server, because - // otherwise both clocks get autoincremented too aggresively. - let clock = ctx::ManualClock::new(); - clock.set_advance_on_sleep(); - let ctx = &ctx::test_with_clock(ctx, &clock); - - expected(Service::new().add_server(server).run(ctx, s1).await).context("server") + expected( + Service::new() + .add_server( + server, + limiter::Rate { + burst: 1, + // with `refresh = 0`, server will never autoadvance time. + refresh: time::Duration::ZERO, + }, + ) + .run(ctx, s1) + .await, + ) + .context("server") }); s.spawn_bg(async { expected(Service::new().add_client(&client).run(ctx, s2).await).context("client") @@ -126,8 +138,9 @@ async fn test_ping_loop() { let now = ctx.now(); assert!(client.ping_loop(ctx, PING_TIMEOUT).await.is_err()); let got = ctx.now() - now; - let want = (max - ping::Rpc::RATE.burst) as u32 * ping::Rpc::RATE.refresh + PING_TIMEOUT; - assert!(got >= want, "want at least {want} latency, but got {got}"); + // PING_COUNT will succeed and the next with time out. + let want = (PING_COUNT + 1) as u32 * PING_TIMEOUT; + assert_eq!(got, want); Ok(()) }) .await @@ -136,13 +149,14 @@ async fn test_ping_loop() { struct ExampleRpc; +const RATE: limiter::Rate = limiter::Rate { + burst: 10, + refresh: time::Duration::ZERO, +}; + impl Rpc for ExampleRpc { const CAPABILITY_ID: mux::CapabilityId = 0; const INFLIGHT: u32 = 5; - const RATE: limiter::Rate = limiter::Rate { - burst: 10, - refresh: time::Duration::ZERO, - }; const METHOD: &'static str = "example"; type Req = (); type Resp = (); @@ -166,10 +180,16 @@ async fn test_inflight() { abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let (s1, s2) = noise::testonly::pipe(ctx).await; - let client = Client::::new(ctx); + let client = Client::::new(ctx, RATE); scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { - expected(Service::new().add_server(ExampleServer).run(ctx, s1).await).context("server") + expected( + Service::new() + .add_server(ExampleServer, RATE) + .run(ctx, s1) + .await, + ) + .context("server") }); s.spawn_bg(async { expected(Service::new().add_client(&client).run(ctx, s2).await).context("client") diff --git a/node/actors/network/src/state.rs b/node/actors/network/src/state.rs index 545281b9..529b999a 100644 --- a/node/actors/network/src/state.rs +++ b/node/actors/network/src/state.rs @@ -1,165 +1 @@ //! Network actor maintaining a pool of outbound and inbound connections to other nodes. -use super::{consensus, event::Event, gossip, metrics, preface}; -use crate::io::{InputMessage, OutputMessage}; -use anyhow::Context as _; -use std::sync::Arc; -use zksync_concurrency::{ctx, ctx::channel, net, scope}; -use zksync_consensus_roles::validator; -use zksync_consensus_storage::BlockStore; -use zksync_consensus_utils::pipe::ActorPipe; - -/// Network actor config. -#[derive(Debug, Clone)] -pub struct Config { - /// TCP socket address to listen for inbound connections at. - pub server_addr: net::tcp::ListenerAddr, - /// Validators which - /// - client should establish outbound connections to. - /// - server should accept inbound connections from (1 per validator). - pub validators: validator::ValidatorSet, - /// Gossip network config. - pub gossip: gossip::Config, - /// Consensus network config. If not present, the node will not participate in the consensus network. - pub consensus: Option, - /// Maximal size of the proto-encoded `validator::FinalBlock` in bytes. - pub max_block_size: usize, - /// Enables pinging the peers to make sure that they are alive. - pub enable_pings: bool, -} - -/// Part of configuration shared among network modules. -#[derive(Debug)] -pub(crate) struct SharedConfig { - /// TCP socket address to listen for inbound connections at. - pub(crate) server_addr: net::tcp::ListenerAddr, - /// Validators which - /// - client should establish outbound connections to. - /// - server should accept inbound connections from (1 per validator). - pub(crate) validators: validator::ValidatorSet, - /// Enables pinging the peers to make sure that they are alive. - pub(crate) enable_pings: bool, - /// Maximal size of the proto-encoded `validator::FinalBlock` in bytes. - pub(crate) max_block_size: usize, -} - -/// State of the network actor observable outside of the actor. -pub struct State { - /// Configuration shared among network modules. - pub(crate) cfg: SharedConfig, - /// Consensus network state. - pub(crate) consensus: Option, - /// Gossip network state. - pub(crate) gossip: gossip::State, - - /// TESTONLY: channel of network events which the tests can observe. - // TODO(gprusak): consider if it would be enough to make it pub(crate). - pub(crate) events: Option>, -} - -impl State { - /// Constructs a new network actor state. - /// Call `run_network` to run the actor. - pub fn new( - cfg: Config, - block_store: Arc, - events: Option>, - ) -> anyhow::Result> { - let consensus = cfg - .consensus - .map(|consensus_cfg| consensus::State::new(consensus_cfg, &cfg.validators)) - .transpose()?; - let this = Self { - gossip: gossip::State::new(cfg.gossip, block_store), - consensus, - events, - cfg: SharedConfig { - server_addr: cfg.server_addr, - validators: cfg.validators, - enable_pings: cfg.enable_pings, - max_block_size: cfg.max_block_size, - }, - }; - Ok(Arc::new(this)) - } - - /// Registers metrics for this state. - pub fn register_metrics(self: &Arc) { - metrics::NetworkGauges::register(Arc::downgrade(self)); - } -} - -/// Runs the network actor. -/// WARNING: it is a bug to call multiple times in parallel -/// run_network with the same `state` argument. -/// TODO(gprusak): consider a "runnable" wrapper of `State` -/// which will be consumed by `run_network`. This way we -/// could prevent the bug above. -pub async fn run_network( - ctx: &ctx::Ctx, - state: Arc, - mut pipe: ActorPipe, -) -> anyhow::Result<()> { - let mut listener = state.cfg.server_addr.bind()?; - let (consensus_send, consensus_recv) = channel::unbounded(); - let (gossip_send, gossip_recv) = channel::unbounded(); - - scope::run!(ctx, |ctx, s| async { - s.spawn(async { - // We don't propagate cancellation errors - while let Ok(message) = pipe.recv.recv(ctx).await { - match message { - InputMessage::Consensus(message) => { - consensus_send.send(message); - } - InputMessage::SyncBlocks(message) => { - gossip_send.send(message); - } - } - } - Ok(()) - }); - - s.spawn(async { - gossip::run_client(ctx, state.as_ref(), &pipe.send, gossip_recv) - .await - .context("gossip::run_client") - }); - - if let Some(consensus_state) = &state.consensus { - s.spawn(async { - consensus::run_client(ctx, consensus_state, state.as_ref(), consensus_recv) - .await - .context("consensus::run_client") - }); - } - - // TODO(gprusak): add rate limit and inflight limit for inbound handshakes. - while let Ok(stream) = metrics::MeteredStream::listen(ctx, &mut listener).await { - let stream = stream.context("listener.accept()")?; - s.spawn(async { - let res = async { - let (stream, endpoint) = preface::accept(ctx, stream).await?; - match endpoint { - preface::Endpoint::ConsensusNet => { - consensus::run_inbound_stream(ctx, &state, &pipe.send, stream) - .await - .context("consensus::run_inbound_stream()") - } - preface::Endpoint::GossipNet => { - gossip::run_inbound_stream(ctx, &state, &pipe.send, stream) - .await - .context("gossip::run_inbound_stream()") - } - } - } - .await; - if let Err(err) = res { - tracing::info!("{err:#}"); - } - Ok(()) - }); - } - Ok(()) - }) - .await -} diff --git a/node/actors/network/src/testonly.rs b/node/actors/network/src/testonly.rs index a1c76721..022bb747 100644 --- a/node/actors/network/src/testonly.rs +++ b/node/actors/network/src/testonly.rs @@ -1,17 +1,12 @@ //! Testonly utilities. #![allow(dead_code)] -use crate::{consensus, event::Event, gossip, Config, State}; +use crate::{Config, GossipConfig, Network, RpcConfig, Runner}; use rand::Rng; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; -use zksync_concurrency::{ - ctx, - ctx::channel, - io, net, scope, - sync::{self}, -}; +use zksync_concurrency::{ctx, ctx::channel, io, net, scope, sync}; use zksync_consensus_roles::{node, validator}; use zksync_consensus_storage::BlockStore; use zksync_consensus_utils::pipe; @@ -45,9 +40,7 @@ pub(crate) async fn forward( /// events channel. pub struct Instance { /// State of the instance. - pub(crate) state: Arc, - /// Stream of events. - pub(crate) events: channel::UnboundedReceiver, + pub(crate) net: Arc, /// Termination signal that can be sent to the node. pub(crate) terminate: channel::Sender<()>, /// Dispatcher end of the network pipe. @@ -55,30 +48,28 @@ pub struct Instance { } /// Construct configs for `n` validators of the consensus. -pub fn new_configs( - rng: &mut R, - setup: &validator::testonly::GenesisSetup, +pub fn new_configs( + rng: &mut impl Rng, + setup: &validator::testonly::Setup, gossip_peers: usize, ) -> Vec { let configs = setup.keys.iter().map(|key| { let addr = net::tcp::testonly::reserve_listener(); Config { server_addr: addr, - validators: setup.validator_set(), + public_addr: *addr, // Pings are disabled in tests by default to avoid dropping connections // due to timeouts. - enable_pings: false, - consensus: Some(consensus::Config { - key: key.clone(), - public_addr: *addr, - }), - gossip: gossip::Config { + ping_timeout: None, + validator_key: Some(key.clone()), + gossip: GossipConfig { key: rng.gen(), - dynamic_inbound_limit: setup.keys.len(), + dynamic_inbound_limit: usize::MAX, static_inbound: HashSet::default(), static_outbound: HashMap::default(), }, max_block_size: usize::MAX, + rpc: RpcConfig::default(), } }); let mut cfgs: Vec<_> = configs.collect(); @@ -95,18 +86,39 @@ pub fn new_configs( cfgs } +/// Constructs a config for a non-validator node, which will +/// establish a gossip connection to `peer`. +pub fn new_fullnode(rng: &mut impl Rng, peer: &Config) -> Config { + let addr = net::tcp::testonly::reserve_listener(); + Config { + server_addr: addr, + public_addr: *addr, + // Pings are disabled in tests by default to avoid dropping connections + // due to timeouts. + ping_timeout: None, + validator_key: None, + gossip: GossipConfig { + key: rng.gen(), + dynamic_inbound_limit: usize::MAX, + static_inbound: HashSet::default(), + static_outbound: [(peer.gossip.key.public(), peer.public_addr)].into(), + }, + max_block_size: usize::MAX, + rpc: RpcConfig::default(), + } +} + /// Runner for Instance. pub struct InstanceRunner { - state: Arc, + runner: Runner, terminate: channel::Receiver<()>, - pipe: pipe::ActorPipe, } impl InstanceRunner { /// Runs the instance background processes. pub async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { scope::run!(ctx, |ctx, s| async { - s.spawn_bg(crate::run_network(ctx, self.state, self.pipe)); + s.spawn_bg(self.runner.run(ctx)); let _ = self.terminate.recv(ctx).await; Ok(()) }) @@ -118,22 +130,22 @@ impl InstanceRunner { impl Instance { /// Construct an instance for a given config. - pub fn new(cfg: Config, block_store: Arc) -> (Self, InstanceRunner) { - let (events_send, events_recv) = channel::unbounded(); + pub fn new( + ctx: &ctx::Ctx, + cfg: Config, + block_store: Arc, + ) -> (Self, InstanceRunner) { let (actor_pipe, dispatcher_pipe) = pipe::new(); - let state = - State::new(cfg, block_store, Some(events_send)).expect("Invalid network config"); + let (net, runner) = Network::new(ctx, cfg, block_store, actor_pipe); let (terminate_send, terminate_recv) = channel::bounded(1); ( Self { - state: state.clone(), - events: events_recv, + net, pipe: dispatcher_pipe, terminate: terminate_send, }, InstanceRunner { - state: state.clone(), - pipe: actor_pipe, + runner, terminate: terminate_recv, }, ) @@ -154,30 +166,25 @@ impl Instance { } /// State getter. - pub fn state(&self) -> &Arc { - &self.state + pub fn state(&self) -> &Arc { + &self.net } - /// Returns the consensus config for this node, assuming it is a validator. - pub fn consensus_config(&self) -> &consensus::Config { - &self - .state - .consensus - .as_ref() - .expect("Node is not a validator") - .cfg + /// Genesis. + pub fn genesis(&self) -> &validator::Genesis { + self.net.gossip.genesis() } /// Returns the gossip config for this node. - pub fn gossip_config(&self) -> &gossip::Config { - &self.state.gossip.cfg + pub fn cfg(&self) -> &Config { + &self.net.gossip.cfg } /// Wait for static outbound gossip connections to be established. pub async fn wait_for_gossip_connections(&self) { - let gossip_state = &self.state.gossip; - let want: HashSet<_> = gossip_state.cfg.static_outbound.keys().cloned().collect(); - gossip_state + let want: HashSet<_> = self.cfg().gossip.static_outbound.keys().cloned().collect(); + self.net + .gossip .outbound .subscribe() .wait_for(|got| want.is_subset(got.current())) @@ -187,9 +194,9 @@ impl Instance { /// Waits for all the consensus connections to be established. pub async fn wait_for_consensus_connections(&self) { - let consensus_state = self.state.consensus.as_ref().unwrap(); + let consensus_state = self.net.consensus.as_ref().unwrap(); - let want: HashSet<_> = self.state.cfg.validators.iter().cloned().collect(); + let want: HashSet<_> = self.genesis().validators.iter().cloned().collect(); consensus_state .inbound .subscribe() @@ -210,7 +217,7 @@ impl Instance { ctx: &ctx::Ctx, peer: &node::PublicKey, ) -> ctx::OrCanceled<()> { - let state = &self.state.gossip; + let state = &self.net.gossip; sync::wait_for(ctx, &mut state.inbound.subscribe(), |got| { !got.current().contains(peer) }) @@ -228,7 +235,7 @@ impl Instance { ctx: &ctx::Ctx, peer: &validator::PublicKey, ) -> ctx::OrCanceled<()> { - let state = self.state.consensus.as_ref().unwrap(); + let state = self.net.consensus.as_ref().unwrap(); sync::wait_for(ctx, &mut state.inbound.subscribe(), |got| { !got.current().contains(peer) }) @@ -253,8 +260,8 @@ pub async fn instant_network( let mut addrs = vec![]; let nodes: Vec<_> = nodes.collect(); for node in &nodes { - let key = node.consensus_config().key.public(); - let sub = &mut node.state.gossip.validator_addrs.subscribe(); + let key = node.cfg().validator_key.as_ref().unwrap().public(); + let sub = &mut node.net.gossip.validator_addrs.subscribe(); loop { if let Some(addr) = sync::changed(ctx, sub).await?.get(&key) { addrs.push(addr.clone()); @@ -264,10 +271,10 @@ pub async fn instant_network( } // Broadcast validator addrs. for node in &nodes { - node.state + node.net .gossip .validator_addrs - .update(&node.state.cfg.validators, &addrs) + .update(&node.genesis().validators, &addrs) .await .unwrap(); } diff --git a/node/actors/network/src/tests.rs b/node/actors/network/src/tests.rs index 75d10b1f..c1cde500 100644 --- a/node/actors/network/src/tests.rs +++ b/node/actors/network/src/tests.rs @@ -11,16 +11,16 @@ async fn test_metrics() { abort_on_panic(); let ctx = &mut ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let setup = validator::testonly::GenesisSetup::new(rng, 3); + let setup = validator::testonly::Setup::new(rng, 3); let cfgs = testonly::new_configs(rng, &setup, 1); scope::run!(ctx, |ctx, s| async { - let (store, runner) = new_store(ctx, &setup.blocks[0]).await; + let (store, runner) = new_store(ctx, &setup.genesis).await; s.spawn_bg(runner.run(ctx)); let nodes: Vec<_> = cfgs .into_iter() .enumerate() .map(|(i, cfg)| { - let (node, runner) = testonly::Instance::new(cfg, store.clone()); + let (node, runner) = testonly::Instance::new(ctx, cfg, store.clone()); s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); node }) diff --git a/node/actors/network/src/watch.rs b/node/actors/network/src/watch.rs index 459d46d4..806d7678 100644 --- a/node/actors/network/src/watch.rs +++ b/node/actors/network/src/watch.rs @@ -3,12 +3,10 @@ use zksync_concurrency::sync; /// Wrapper of the tokio::sync::Watch. pub(crate) struct Watch { - /// Mutex-wrapped sender, so that it can be - /// accessed via immutable reference. + /// `sync::watch::Sender` contains synchronous mutex. + /// We wrap it into an async mutex to wait for it asynchronously. send: sync::Mutex>, - /// By keeping a copy of a receiver, sender - /// is never closed, which gets rid of send - /// errors. + /// Receiver outside of the mutex so that `subscribe()` can be nonblocking. recv: sync::watch::Receiver, } diff --git a/node/actors/sync_blocks/src/config.rs b/node/actors/sync_blocks/src/config.rs index d524cd72..a3c9c88a 100644 --- a/node/actors/sync_blocks/src/config.rs +++ b/node/actors/sync_blocks/src/config.rs @@ -1,16 +1,9 @@ //! Configuration for the `SyncBlocks` actor. - use zksync_concurrency::time; -use zksync_consensus_roles::validator::ValidatorSet; /// Configuration for the `SyncBlocks` actor. #[derive(Debug)] pub struct Config { - /// Set of validators authoring blocks. - pub(crate) validator_set: ValidatorSet, - /// Consensus threshold for blocks quorum certificates. - pub(crate) consensus_threshold: usize, - /// Maximum number of blocks to attempt to get concurrently from all peers in total. pub(crate) max_concurrent_blocks: usize, /// Maximum number of blocks to attempt to get concurrently from any single peer. @@ -20,26 +13,20 @@ pub struct Config { pub(crate) sleep_interval_for_get_block: time::Duration, } +impl Default for Config { + fn default() -> Self { + Self::new() + } +} + impl Config { /// Creates a new configuration with the provided mandatory params. - pub fn new(validator_set: ValidatorSet, consensus_threshold: usize) -> anyhow::Result { - anyhow::ensure!( - consensus_threshold > 0, - "`consensus_threshold` must be positive" - ); - anyhow::ensure!(validator_set.len() > 0, "`validator_set` must not be empty"); - anyhow::ensure!( - consensus_threshold <= validator_set.len(), - "`consensus_threshold` must not exceed length of `validator_set`" - ); - - Ok(Self { - validator_set, - consensus_threshold, + pub fn new() -> Self { + Self { max_concurrent_blocks: 20, max_concurrent_blocks_per_peer: 5, sleep_interval_for_get_block: time::Duration::seconds(10), - }) + } } /// Sets the maximum number of blocks to attempt to get concurrently. diff --git a/node/actors/sync_blocks/src/lib.rs b/node/actors/sync_blocks/src/lib.rs index 977ad696..e46e340a 100644 --- a/node/actors/sync_blocks/src/lib.rs +++ b/node/actors/sync_blocks/src/lib.rs @@ -28,7 +28,7 @@ impl Config { ) -> anyhow::Result<()> { let peer_states = PeerStates::new(self, storage.clone(), pipe.send); let result: ctx::Result<()> = scope::run!(ctx, |ctx, s| async { - s.spawn_bg(async { Ok(peer_states.run_block_fetcher(ctx).await?) }); + s.spawn_bg(async { peer_states.run_block_fetcher(ctx).await }); loop { match pipe.recv.recv(ctx).await? { InputMessage::Network(SyncBlocksRequest::UpdatePeerSyncState { diff --git a/node/actors/sync_blocks/src/peers/mod.rs b/node/actors/sync_blocks/src/peers/mod.rs index ae4e3f1c..f57ea8bd 100644 --- a/node/actors/sync_blocks/src/peers/mod.rs +++ b/node/actors/sync_blocks/src/peers/mod.rs @@ -1,5 +1,4 @@ //! Peer states tracked by the `SyncBlocks` actor. - use self::events::PeerStateEvent; use crate::{io, Config}; use anyhow::Context as _; @@ -13,11 +12,10 @@ use zksync_concurrency::{ }; use zksync_consensus_network::io::SyncBlocksInputMessage; use zksync_consensus_roles::{ - node, + node, validator, validator::{BlockNumber, FinalBlock}, }; use zksync_consensus_storage::{BlockStore, BlockStoreState}; -use zksync_consensus_utils::no_copy::NoCopy; mod events; #[cfg(test)] @@ -42,6 +40,10 @@ pub(crate) struct PeerStates { } impl PeerStates { + fn genesis(&self) -> &validator::Genesis { + self.storage.genesis() + } + /// Creates a new instance together with a handle. pub(crate) fn new( config: Config, @@ -68,37 +70,34 @@ impl PeerStates { state: BlockStoreState, ) -> anyhow::Result<()> { use std::collections::hash_map::Entry; - - let last = state.last.header().number; - anyhow::ensure!(state.first.header().number <= state.last.header().number); - state - .last - .verify(&self.config.validator_set, self.config.consensus_threshold) - .context("state.last.verify()")?; + let Some(last) = &state.last else { + return Ok(()); + }; + last.verify(self.genesis()).context("state.last.verify()")?; let mut peers = self.peers.lock().unwrap(); match peers.entry(peer.clone()) { - Entry::Occupied(mut e) => e.get_mut().state = state, + Entry::Occupied(mut e) => e.get_mut().state = state.clone(), Entry::Vacant(e) => { let permits = self.config.max_concurrent_blocks_per_peer; e.insert(PeerState { - state, + state: state.clone(), get_block_semaphore: Arc::new(sync::Semaphore::new(permits)), }); } } self.highest_peer_block .send_if_modified(|highest_peer_block| { - if *highest_peer_block >= last { + if *highest_peer_block >= last.header().number { return false; } - *highest_peer_block = last; + *highest_peer_block = last.header().number; true }); Ok(()) } /// Task fetching blocks from peers which are not present in storage. - pub(crate) async fn run_block_fetcher(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled<()> { + pub(crate) async fn run_block_fetcher(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { let sem = sync::Semaphore::new(self.config.max_concurrent_blocks); scope::run!(ctx, |ctx, s| async { let mut next = self.storage.subscribe().borrow().next(); @@ -109,11 +108,11 @@ impl PeerStates { }) .await?; let permit = sync::acquire(ctx, &sem).await?; - let block_number = NoCopy::from(next); + let block_number = ctx::NoCopy(next); next = next.next(); s.spawn(async { let _permit = permit; - self.fetch_block(ctx, block_number.into_inner()).await + self.fetch_block(ctx, block_number.into()).await }); } }) @@ -122,17 +121,19 @@ impl PeerStates { /// Fetches the block from peers and puts it to storage. /// Early exits if the block appeared in storage from other source. - async fn fetch_block(&self, ctx: &ctx::Ctx, block_number: BlockNumber) -> ctx::OrCanceled<()> { + async fn fetch_block(&self, ctx: &ctx::Ctx, block_number: BlockNumber) -> ctx::Result<()> { let _ = scope::run!(ctx, |ctx, s| async { s.spawn_bg(async { let block = self.fetch_block_from_peers(ctx, block_number).await?; self.storage.queue_block(ctx, block).await }); // Cancel fetching as soon as block is queued for storage. - self.storage.wait_until_queued(ctx, block_number).await + self.storage.wait_until_queued(ctx, block_number).await?; + Ok(()) }) .await; - self.storage.wait_until_persisted(ctx, block_number).await + self.storage.wait_until_persisted(ctx, block_number).await?; + Ok(()) } /// Fetches the block from peers. @@ -200,9 +201,7 @@ impl PeerStates { ) .into()); } - block - .validate(&self.config.validator_set, self.config.consensus_threshold) - .context("block.validate()")?; + block.verify(self.genesis()).context("block.validate()")?; Ok(block) } diff --git a/node/actors/sync_blocks/src/peers/tests/basics.rs b/node/actors/sync_blocks/src/peers/tests/basics.rs index 4ccda284..18468c7d 100644 --- a/node/actors/sync_blocks/src/peers/tests/basics.rs +++ b/node/actors/sync_blocks/src/peers/tests/basics.rs @@ -3,8 +3,9 @@ use super::*; use crate::{ io, - tests::{send_block, sync_state}, + tests::{make_response, sync_state}, }; +use rand::seq::SliceRandom as _; #[derive(Debug)] struct UpdatingPeerStateWithSingleBlock; @@ -26,7 +27,7 @@ impl Test for UpdatingPeerStateWithSingleBlock { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, 1)) + .update(&peer_key, sync_state(&setup, setup.blocks.first())) .unwrap(); // Check that the actor has sent a `get_block` request to the peer @@ -37,16 +38,18 @@ impl Test for UpdatingPeerStateWithSingleBlock { response, }) = message; assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(1)); + assert_eq!(number, setup.blocks[0].number()); // Emulate the peer sending a correct response. - send_block(&setup, BlockNumber(1), response); + response.send(make_response(setup.blocks.first())).unwrap(); let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(BlockNumber(1))); + assert_matches!(peer_event, PeerStateEvent::GotBlock(n) if n == setup.blocks[0].number()); // Check that the block has been saved locally. - storage.wait_until_persisted(ctx, BlockNumber(1)).await?; + storage + .wait_until_persisted(ctx, setup.blocks[0].number()) + .await?; Ok(()) } } @@ -75,7 +78,7 @@ impl Test for CancelingBlockRetrieval { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, 1)) + .update(&peer_key, sync_state(&setup, setup.blocks.first())) .unwrap(); // Check that the actor has sent a `get_block` request to the peer @@ -83,7 +86,7 @@ impl Test for CancelingBlockRetrieval { message_receiver.recv(ctx).await?; // Emulate receiving block using external means. - storage.queue_block(ctx, setup.blocks[1].clone()).await?; + storage.queue_block(ctx, setup.blocks[0].clone()).await?; // Retrieval of the block must be canceled. response.closed().await; @@ -113,12 +116,12 @@ impl Test for FilteringBlockRetrieval { } = handles; // Emulate receiving block using external means. - storage.queue_block(ctx, setup.blocks[1].clone()).await?; + storage.queue_block(ctx, setup.blocks[0].clone()).await?; let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, 2)) + .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) .unwrap(); // Check that the actor has sent `get_block` request to the peer, but only for block #2. @@ -127,8 +130,7 @@ impl Test for FilteringBlockRetrieval { recipient, number, .. }) = message; assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(2)); - + assert_eq!(number, setup.blocks[1].number()); assert!(message_receiver.try_recv().is_none()); Ok(()) } @@ -150,10 +152,12 @@ impl UpdatingPeerStateWithMultipleBlocks { impl Test for UpdatingPeerStateWithMultipleBlocks { const BLOCK_COUNT: usize = 10; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.max_concurrent_blocks_per_peer = Self::MAX_CONCURRENT_BLOCKS; // ^ We want to test rate limiting for peers config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -169,11 +173,11 @@ impl Test for UpdatingPeerStateWithMultipleBlocks { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, Self::BLOCK_COUNT - 1).clone()) + .update(&peer_key, sync_state(&setup, setup.blocks.last()).clone()) .unwrap(); - let mut requested_blocks = HashMap::with_capacity(Self::MAX_CONCURRENT_BLOCKS); - for _ in 1..Self::BLOCK_COUNT { + let mut requested_blocks = HashMap::new(); + for _ in setup.blocks.iter() { let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { recipient, number, @@ -191,7 +195,7 @@ impl Test for UpdatingPeerStateWithMultipleBlocks { // Answer a random request. let number = *requested_blocks.keys().choose(rng).unwrap(); let response = requested_blocks.remove(&number).unwrap(); - send_block(&setup, number, response); + response.send(make_response(setup.block(number))).unwrap(); let peer_event = events_receiver.recv(ctx).await?; assert_matches!(peer_event, PeerStateEvent::GotBlock(got) if got == number); @@ -201,7 +205,7 @@ impl Test for UpdatingPeerStateWithMultipleBlocks { // Answer all remaining requests. for (number, response) in requested_blocks { - send_block(&setup, number, response); + response.send(make_response(setup.block(number))).unwrap(); let peer_event = events_receiver.recv(ctx).await?; assert_matches!(peer_event, PeerStateEvent::GotBlock(got) if got == number); } @@ -226,8 +230,10 @@ struct DisconnectingPeer; impl Test for DisconnectingPeer { const BLOCK_COUNT: usize = 5; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -243,7 +249,7 @@ impl Test for DisconnectingPeer { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, 1)) + .update(&peer_key, sync_state(&setup, setup.blocks.first())) .unwrap(); // Drop the response sender emulating peer disconnect. @@ -255,7 +261,7 @@ impl Test for DisconnectingPeer { .. }) = &msg; assert_eq!(recipient, &peer_key); - assert_eq!(number, &BlockNumber(1)); + assert_eq!(number, &setup.blocks[0].number()); } drop(msg); @@ -273,7 +279,7 @@ impl Test for DisconnectingPeer { // Re-connect the peer with an updated state. peer_states - .update(&peer_key, sync_state(&setup, 2)) + .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) .unwrap(); // Ensure that blocks are re-requested. clock.advance(BLOCK_SLEEP_INTERVAL); @@ -287,18 +293,20 @@ impl Test for DisconnectingPeer { response, }) = message; assert_eq!(recipient, peer_key); - assert!(responses.insert(number.0, response).is_none()); + assert!(responses.insert(number, response).is_none()); } - assert!(responses.contains_key(&1)); - assert!(responses.contains_key(&2)); + assert!(responses.contains_key(&setup.blocks[0].number())); + assert!(responses.contains_key(&setup.blocks[1].number())); // Send one of the responses and drop the other request. - let response = responses.remove(&2).unwrap(); - send_block(&setup, BlockNumber(2), response); + let response = responses.remove(&setup.blocks[1].number()).unwrap(); + response.send(make_response(setup.blocks.get(1))).unwrap(); - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, PeerStateEvent::GotBlock(BlockNumber(2))) - }) + wait_for_event( + ctx, + &mut events_receiver, + |ev| matches!(ev, PeerStateEvent::GotBlock(n) if n==setup.blocks[1].number()), + ) .await?; drop(responses); wait_for_event( @@ -314,7 +322,7 @@ impl Test for DisconnectingPeer { // Re-connect the peer with the same state. peer_states - .update(&peer_key, sync_state(&setup, 2)) + .update(&peer_key, sync_state(&setup, setup.blocks.get(1))) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); @@ -325,17 +333,17 @@ impl Test for DisconnectingPeer { response, }) = message; assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(1)); - send_block(&setup, number, response); + assert_eq!(number, setup.blocks[0].number()); + response.send(make_response(setup.blocks.first())).unwrap(); let peer_event = events_receiver.recv(ctx).await?; - assert_matches!(peer_event, PeerStateEvent::GotBlock(BlockNumber(1))); + assert_matches!(peer_event, PeerStateEvent::GotBlock(n) if n==setup.blocks[0].number()); // Check that no new requests are sent (all blocks are downloaded). clock.advance(BLOCK_SLEEP_INTERVAL); assert_matches!(message_receiver.try_recv(), None); - storage.wait_until_persisted(ctx, BlockNumber(2)).await?; + storage.wait_until_persisted(ctx, BlockNumber(1)).await?; Ok(()) } } @@ -347,18 +355,14 @@ async fn disconnecting_peer() { #[derive(Debug)] struct DownloadingBlocksInGaps { - local_block_numbers: Vec, + local_blocks: Vec, increase_peer_block_number_during_test: bool, } impl DownloadingBlocksInGaps { - fn new(local_block_numbers: &[usize]) -> Self { + fn new(local_blocks: &[usize]) -> Self { Self { - local_block_numbers: local_block_numbers - .iter() - .copied() - .inspect(|&number| assert!(number > 0 && number < Self::BLOCK_COUNT)) - .collect(), + local_blocks: local_blocks.to_vec(), increase_peer_block_number_during_test: false, } } @@ -368,10 +372,12 @@ impl DownloadingBlocksInGaps { impl Test for DownloadingBlocksInGaps { const BLOCK_COUNT: usize = 10; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.max_concurrent_blocks = 1; // ^ Forces the node to download blocks in a deterministic order config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -385,30 +391,31 @@ impl Test for DownloadingBlocksInGaps { } = handles; scope::run!(ctx, |ctx, s| async { - for &block_number in &self.local_block_numbers { - s.spawn(storage.queue_block(ctx, setup.blocks[block_number].clone())); + for n in &self.local_blocks { + s.spawn(storage.queue_block(ctx, setup.blocks[*n].clone())); } let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); - let mut last_peer_block_number = if self.increase_peer_block_number_during_test { - rng.gen_range(1..Self::BLOCK_COUNT) + let mut last_peer_block = if self.increase_peer_block_number_during_test { + setup.blocks.choose(rng) } else { - Self::BLOCK_COUNT - 1 + setup.blocks.last() }; peer_states - .update(&peer_key, sync_state(&setup, last_peer_block_number)) + .update(&peer_key, sync_state(&setup, last_peer_block)) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); - let expected_block_numbers = - (1..Self::BLOCK_COUNT).filter(|number| !self.local_block_numbers.contains(number)); - // Check that all missing blocks are requested. - for expected_number in expected_block_numbers { - if expected_number > last_peer_block_number { - last_peer_block_number = rng.gen_range(expected_number..Self::BLOCK_COUNT); + for n in 0..setup.blocks.len() { + if self.local_blocks.contains(&n) { + continue; + } + let n = setup.blocks[n].number(); + if n > last_peer_block.unwrap().number() { + last_peer_block = setup.blocks.iter().filter(|b| b.number() >= n).choose(rng); peer_states - .update(&peer_key, sync_state(&setup, last_peer_block_number)) + .update(&peer_key, sync_state(&setup, last_peer_block)) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); } @@ -420,8 +427,8 @@ impl Test for DownloadingBlocksInGaps { }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, peer_key); - assert!(number.0 <= last_peer_block_number as u64); - send_block(&setup, number, response); + assert!(number <= last_peer_block.unwrap().number()); + response.send(make_response(setup.block(number))).unwrap(); storage.wait_until_persisted(ctx, number).await?; clock.advance(BLOCK_SLEEP_INTERVAL); } @@ -437,10 +444,10 @@ const LOCAL_BLOCK_NUMBERS: [&[usize]; 3] = [&[1, 9], &[3, 5, 6, 8], &[4]]; #[test_casing(6, Product((LOCAL_BLOCK_NUMBERS, [false, true])))] #[tokio::test] async fn downloading_blocks_in_gaps( - local_block_numbers: &[usize], + local_blocks: &[usize], increase_peer_block_number_during_test: bool, ) { - let mut test = DownloadingBlocksInGaps::new(local_block_numbers); + let mut test = DownloadingBlocksInGaps::new(local_blocks); test.increase_peer_block_number_during_test = increase_peer_block_number_during_test; test_peer_states(test).await; } @@ -452,8 +459,10 @@ struct LimitingGetBlockConcurrency; impl Test for LimitingGetBlockConcurrency { const BLOCK_COUNT: usize = 5; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.max_concurrent_blocks = 3; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -467,12 +476,12 @@ impl Test for LimitingGetBlockConcurrency { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, Self::BLOCK_COUNT - 1)) + .update(&peer_key, sync_state(&setup, setup.blocks.last())) .unwrap(); // The actor should request 3 new blocks it's now aware of from the only peer it's currently // aware of. Note that blocks may be queried in any order. - let mut message_responses = HashMap::with_capacity(3); + let mut message_responses = HashMap::new(); for _ in 0..3 { let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { recipient, @@ -480,26 +489,28 @@ impl Test for LimitingGetBlockConcurrency { response, }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, peer_key); - assert!(message_responses.insert(number.0, response).is_none()); + assert!(message_responses.insert(number, response).is_none()); } assert_matches!(message_receiver.try_recv(), None); assert_eq!( message_responses.keys().copied().collect::>(), - HashSet::from([1, 2, 3]) + setup.blocks[0..3].iter().map(|b| b.number()).collect(), ); tracing::info!("blocks requrested"); // Send a correct response. - let response = message_responses.remove(&1).unwrap(); - send_block(&setup, BlockNumber(1), response); - storage.wait_until_persisted(ctx, BlockNumber(1)).await?; + let response = message_responses.remove(&setup.blocks[0].number()).unwrap(); + response.send(make_response(setup.blocks.first())).unwrap(); + storage + .wait_until_persisted(ctx, setup.blocks[0].number()) + .await?; // The actor should now request another block. let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { recipient, number, .. }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(4)); + assert_eq!(number, setup.blocks[3].number()); Ok(()) } diff --git a/node/actors/sync_blocks/src/peers/tests/fakes.rs b/node/actors/sync_blocks/src/peers/tests/fakes.rs index bc8e5da1..95c6ddd8 100644 --- a/node/actors/sync_blocks/src/peers/tests/fakes.rs +++ b/node/actors/sync_blocks/src/peers/tests/fakes.rs @@ -2,32 +2,29 @@ use super::*; use crate::tests::sync_state; -use zksync_consensus_roles::{validator, validator::testonly::GenesisSetup}; +use zksync_consensus_roles::{validator, validator::testonly::Setup}; use zksync_consensus_storage::testonly::new_store; #[tokio::test] async fn processing_invalid_sync_states() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let mut setup = GenesisSetup::empty(rng, 4); + let mut setup = Setup::new(rng, 4); setup.push_blocks(rng, 3); - let (storage, _runner) = new_store(ctx, &setup.blocks[0]).await; + let (storage, _runner) = new_store(ctx, &setup.genesis).await; let (message_sender, _) = channel::unbounded(); - let peer_states = PeerStates::new(test_config(&setup), storage, message_sender); - + let peer_states = PeerStates::new(Config::new(), storage, message_sender); let peer = &rng.gen::().public(); - let mut invalid_sync_state = sync_state(&setup, 1); - invalid_sync_state.first = setup.blocks[2].justification.clone(); - assert!(peer_states.update(peer, invalid_sync_state).is_err()); - let mut invalid_sync_state = sync_state(&setup, 1); - invalid_sync_state.last.message.proposal.number = BlockNumber(5); + let mut invalid_block = setup.blocks[1].clone(); + invalid_block.justification.message.proposal.number = rng.gen(); + let invalid_sync_state = sync_state(&setup, Some(&invalid_block)); assert!(peer_states.update(peer, invalid_sync_state).is_err()); - let mut other_network = GenesisSetup::empty(rng, 4); + let mut other_network = Setup::new(rng, 4); other_network.push_blocks(rng, 2); - let invalid_sync_state = sync_state(&other_network, 1); + let invalid_sync_state = sync_state(&other_network, other_network.blocks.get(1)); assert!(peer_states.update(peer, invalid_sync_state).is_err()); } @@ -49,8 +46,9 @@ impl Test for PeerWithFakeSyncState { let rng = &mut ctx.rng(); let peer_key = rng.gen::().public(); - let mut fake_sync_state = sync_state(&setup, 1); - fake_sync_state.last.message.proposal.number = BlockNumber(42); + let mut invalid_block = setup.blocks[1].clone(); + invalid_block.justification.message.proposal.number = rng.gen(); + let fake_sync_state = sync_state(&setup, Some(&invalid_block)); assert!(peer_states.update(&peer_key, fake_sync_state).is_err()); clock.advance(BLOCK_SLEEP_INTERVAL); @@ -71,8 +69,10 @@ struct PeerWithFakeBlock; impl Test for PeerWithFakeBlock { const BLOCK_COUNT: usize = 10; - fn tweak_config(&self, cfg: &mut Config) { + fn config(&self) -> Config { + let mut cfg = Config::new(); cfg.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; + cfg } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -89,23 +89,23 @@ impl Test for PeerWithFakeBlock { for fake_block in [ // other block than requested - setup.blocks[0].clone(), + setup.blocks[1].clone(), // block with wrong validator set { - let mut setup = GenesisSetup::empty(rng, 4); - setup.push_blocks(rng, 2); - setup.blocks[1].clone() + let mut s = Setup::new(rng, 4); + s.push_blocks(rng, 1); + s.blocks[0].clone() }, // block with mismatching payload, { - let mut block = setup.blocks[1].clone(); + let mut block = setup.blocks[0].clone(); block.payload = validator::Payload(b"invalid".to_vec()); block }, ] { - let peer_key = rng.gen::().public(); + let key = rng.gen::().public(); peer_states - .update(&peer_key, sync_state(&setup, 1)) + .update(&key, sync_state(&setup, setup.blocks.first())) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); @@ -114,16 +114,16 @@ impl Test for PeerWithFakeBlock { number, response, }) = message_receiver.recv(ctx).await?; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(1)); + assert_eq!(recipient, key); + assert_eq!(number, setup.blocks[0].number()); response.send(Ok(fake_block)).unwrap(); wait_for_event(ctx, &mut events_receiver, |ev| { matches!(ev, PeerStateEvent::RpcFailed { - block_number: BlockNumber(1), - peer_key: key, - } if key == peer_key + block_number, + peer_key, + } if peer_key == key && block_number == number ) }) .await?; diff --git a/node/actors/sync_blocks/src/peers/tests/mod.rs b/node/actors/sync_blocks/src/peers/tests/mod.rs index 6349c408..d6c9d66d 100644 --- a/node/actors/sync_blocks/src/peers/tests/mod.rs +++ b/node/actors/sync_blocks/src/peers/tests/mod.rs @@ -1,5 +1,4 @@ use super::*; -use crate::tests::test_config; use assert_matches::assert_matches; use async_trait::async_trait; use rand::{seq::IteratorRandom, Rng}; @@ -16,7 +15,6 @@ use zksync_consensus_storage::testonly::new_store; mod basics; mod fakes; mod multiple_peers; -mod snapshots; const TEST_TIMEOUT: time::Duration = time::Duration::seconds(5); const BLOCK_SLEEP_INTERVAL: time::Duration = time::Duration::milliseconds(5); @@ -33,7 +31,7 @@ async fn wait_for_event( #[derive(Debug)] struct TestHandles { clock: ctx::ManualClock, - setup: validator::testonly::GenesisSetup, + setup: validator::testonly::Setup, peer_states: Arc, storage: Arc, message_receiver: channel::UnboundedReceiver, @@ -43,17 +41,18 @@ struct TestHandles { #[async_trait] trait Test: fmt::Debug + Send + Sync { const BLOCK_COUNT: usize; + // TODO: move this to genesis const GENESIS_BLOCK_NUMBER: usize = 0; - fn tweak_config(&self, _config: &mut Config) { - // Does nothing by default + fn config(&self) -> Config { + Config::new() } async fn initialize_storage( &self, _ctx: &ctx::Ctx, _storage: &BlockStore, - _setup: &validator::testonly::GenesisSetup, + _setup: &validator::testonly::Setup, ) { // Does nothing by default } @@ -69,16 +68,14 @@ async fn test_peer_states(test: T) { let clock = ctx::ManualClock::new(); let ctx = &ctx::test_root(&clock); let rng = &mut ctx.rng(); - let mut setup = validator::testonly::GenesisSetup::new(rng, 4); + let mut setup = validator::testonly::Setup::new(rng, 4); setup.push_blocks(rng, T::BLOCK_COUNT); - let (store, store_run) = new_store(ctx, &setup.blocks[T::GENESIS_BLOCK_NUMBER]).await; + let (store, store_run) = new_store(ctx, &setup.genesis).await; test.initialize_storage(ctx, store.as_ref(), &setup).await; let (message_sender, message_receiver) = channel::unbounded(); let (events_sender, events_receiver) = channel::unbounded(); - let mut config = test_config(&setup); - test.tweak_config(&mut config); - let mut peer_states = PeerStates::new(config, store.clone(), message_sender); + let mut peer_states = PeerStates::new(test.config(), store.clone(), message_sender); peer_states.events_sender = Some(events_sender); let peer_states = Arc::new(peer_states); let test_handles = TestHandles { diff --git a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs b/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs index dcc21ea5..c6305c36 100644 --- a/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs +++ b/node/actors/sync_blocks/src/peers/tests/multiple_peers.rs @@ -1,7 +1,5 @@ -//! Tests focused on interaction with multiple peers. - use super::*; -use crate::tests::{send_block, sync_state}; +use crate::tests::{make_response, sync_state}; #[derive(Debug)] struct RequestingBlocksFromTwoPeers; @@ -10,10 +8,12 @@ struct RequestingBlocksFromTwoPeers; impl Test for RequestingBlocksFromTwoPeers { const BLOCK_COUNT: usize = 5; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; config.max_concurrent_blocks = 5; config.max_concurrent_blocks_per_peer = 1; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -29,7 +29,7 @@ impl Test for RequestingBlocksFromTwoPeers { let rng = &mut ctx.rng(); let first_peer = rng.gen::().public(); peer_states - .update(&first_peer, sync_state(&setup, 2)) + .update(&first_peer, sync_state(&setup, setup.blocks.get(1))) .unwrap(); let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { @@ -38,14 +38,14 @@ impl Test for RequestingBlocksFromTwoPeers { response: first_peer_response, }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, first_peer); - assert!( - first_peer_block_number == BlockNumber(1) || first_peer_block_number == BlockNumber(2) - ); + assert!(setup.blocks[0..=1] + .iter() + .any(|b| b.number() == first_peer_block_number)); tracing::info!(%first_peer_block_number, "received request"); let second_peer = rng.gen::().public(); peer_states - .update(&second_peer, sync_state(&setup, 4)) + .update(&second_peer, sync_state(&setup, setup.blocks.get(3))) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); @@ -55,13 +55,14 @@ impl Test for RequestingBlocksFromTwoPeers { response: second_peer_response, }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, second_peer); - assert!( - second_peer_block_number == BlockNumber(1) - || second_peer_block_number == BlockNumber(2) - ); + assert!(setup.blocks[0..=1] + .iter() + .any(|b| b.number() == second_peer_block_number)); tracing::info!(%second_peer_block_number, "received requrest"); - send_block(&setup, first_peer_block_number, first_peer_response); + first_peer_response + .send(make_response(setup.block(first_peer_block_number))) + .unwrap(); wait_for_event( ctx, &mut events_receiver, @@ -75,7 +76,7 @@ impl Test for RequestingBlocksFromTwoPeers { assert_matches!(message_receiver.try_recv(), None); peer_states - .update(&first_peer, sync_state(&setup, 4)) + .update(&first_peer, sync_state(&setup, setup.blocks.get(3))) .unwrap(); clock.advance(BLOCK_SLEEP_INTERVAL); // Now the actor can get block #3 from the peer. @@ -86,12 +87,14 @@ impl Test for RequestingBlocksFromTwoPeers { response: first_peer_response, }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, first_peer); - assert!( - first_peer_block_number == BlockNumber(3) || first_peer_block_number == BlockNumber(4) - ); + assert!(setup.blocks[2..=3] + .iter() + .any(|b| b.number() == first_peer_block_number)); tracing::info!(%first_peer_block_number, "received requrest"); - send_block(&setup, first_peer_block_number, first_peer_response); + first_peer_response + .send(make_response(setup.block(first_peer_block_number))) + .unwrap(); wait_for_event( ctx, &mut events_receiver, @@ -107,12 +110,14 @@ impl Test for RequestingBlocksFromTwoPeers { response: first_peer_response, }) = message_receiver.recv(ctx).await?; assert_eq!(recipient, first_peer); - assert!( - first_peer_block_number == BlockNumber(3) || first_peer_block_number == BlockNumber(4) - ); + assert!(setup.blocks[2..=3] + .iter() + .any(|b| b.number() == first_peer_block_number)); tracing::info!(%first_peer_block_number, "received requrest"); - send_block(&setup, second_peer_block_number, second_peer_response); + second_peer_response + .send(make_response(setup.block(second_peer_block_number))) + .unwrap(); wait_for_event( ctx, &mut events_receiver, @@ -120,7 +125,9 @@ impl Test for RequestingBlocksFromTwoPeers { ) .await .unwrap(); - send_block(&setup, first_peer_block_number, first_peer_response); + first_peer_response + .send(make_response(setup.block(first_peer_block_number))) + .unwrap(); wait_for_event( ctx, &mut events_receiver, @@ -132,7 +139,9 @@ impl Test for RequestingBlocksFromTwoPeers { clock.advance(BLOCK_SLEEP_INTERVAL); assert_matches!(message_receiver.try_recv(), None); - storage.wait_until_persisted(ctx, BlockNumber(4)).await?; + storage + .wait_until_persisted(ctx, setup.blocks[3].number()) + .await?; Ok(()) } } @@ -145,17 +154,17 @@ async fn requesting_blocks_from_two_peers() { #[derive(Debug, Clone, Copy)] struct PeerBehavior { /// The peer will go offline after this block. - last_block: BlockNumber, + last_block: usize, /// The peer will stop responding after this block, but will still announce `SyncState` updates. /// Logically, should be `<= last_block`. - last_block_to_return: BlockNumber, + last_block_to_return: usize, } impl Default for PeerBehavior { fn default() -> Self { Self { - last_block: BlockNumber(u64::MAX), - last_block_to_return: BlockNumber(u64::MAX), + last_block: usize::MAX, + last_block_to_return: usize::MAX, } } } @@ -177,7 +186,7 @@ impl RequestingBlocksFromMultiplePeers { } fn create_peers(&self, rng: &mut impl Rng) -> HashMap { - let last_block_number = BlockNumber(Self::BLOCK_COUNT as u64 - 1); + let last_block_number = Self::BLOCK_COUNT - 1; let peers = self.peer_behavior.iter().copied().map(|behavior| { let behavior = PeerBehavior { last_block: behavior.last_block.min(last_block_number), @@ -194,9 +203,11 @@ impl RequestingBlocksFromMultiplePeers { impl Test for RequestingBlocksFromMultiplePeers { const BLOCK_COUNT: usize = 20; - fn tweak_config(&self, config: &mut Config) { + fn config(&self) -> Config { + let mut config = Config::new(); config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; config.max_concurrent_blocks_per_peer = self.max_concurrent_blocks_per_peer; + config } async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { @@ -215,14 +226,13 @@ impl Test for RequestingBlocksFromMultiplePeers { scope::run!(ctx, |ctx, s| async { // Announce peer states. for (peer_key, peer) in peers { - let last_block = peer.last_block.0 as usize; - peer_states.update(peer_key, sync_state(&setup, last_block)).unwrap(); + peer_states.update(peer_key, sync_state(&setup, setup.blocks.get(peer.last_block))).unwrap(); } s.spawn_bg(async { let mut responses_by_peer: HashMap<_, Vec<_>> = HashMap::new(); let mut requested_blocks = HashSet::new(); - while requested_blocks.len() < Self::BLOCK_COUNT - 1 { + while requested_blocks.len() < Self::BLOCK_COUNT { let Ok(message) = message_receiver.recv(ctx).await else { return Ok(()); // Test is finished }; @@ -233,9 +243,9 @@ impl Test for RequestingBlocksFromMultiplePeers { }) = message; tracing::trace!("Block #{number} requested from {recipient:?}"); - assert!(number <= peers[&recipient].last_block); + assert!(number <= setup.blocks[peers[&recipient].last_block].number()); - if peers[&recipient].last_block_to_return < number { + if setup.blocks[peers[&recipient].last_block_to_return].number() < number { tracing::trace!("Dropping request for block #{number} to {recipient:?}"); continue; } @@ -251,7 +261,7 @@ impl Test for RequestingBlocksFromMultiplePeers { // Peer is at capacity, respond to a random request in order to progress let idx = rng.gen_range(0..peer_responses.len()); let (number, response) = peer_responses.remove(idx); - send_block(&setup, number, response); + response.send(make_response(setup.block(number))).unwrap(); } // Respond to some other random requests. @@ -262,22 +272,22 @@ impl Test for RequestingBlocksFromMultiplePeers { continue; } let (number, response) = peer_responses.remove(idx); - send_block(&setup, number, response); + response.send(make_response(setup.block(number))).unwrap(); } } } // Answer to all remaining responses for (number, response) in responses_by_peer.into_values().flatten() { - send_block(&setup, number, response); + response.send(make_response(setup.block(number))).unwrap(); } Ok(()) }); // We advance the clock when a node receives a new block or updates a peer state, // since in both cases some new blocks may become available for download. - let mut block_numbers = HashSet::with_capacity(Self::BLOCK_COUNT - 1); - while block_numbers.len() < Self::BLOCK_COUNT - 1 { + let mut block_numbers = HashSet::with_capacity(Self::BLOCK_COUNT); + while block_numbers.len() < Self::BLOCK_COUNT { let peer_event = events_receiver.recv(ctx).await?; match peer_event { PeerStateEvent::GotBlock(number) => { @@ -291,7 +301,7 @@ impl Test for RequestingBlocksFromMultiplePeers { } } - storage.wait_until_persisted(ctx,BlockNumber(19)).await?; + storage.wait_until_persisted(ctx,setup.blocks.last().unwrap().header().number).await?; Ok(()) }) .await @@ -316,8 +326,8 @@ async fn requesting_blocks_with_failures( ) { let mut test = RequestingBlocksFromMultiplePeers::new(3, max_concurrent_blocks_per_peer); test.respond_probability = respond_probability; - test.peer_behavior[0].last_block = BlockNumber(5); - test.peer_behavior[1].last_block = BlockNumber(15); + test.peer_behavior[0].last_block = 5; + test.peer_behavior[1].last_block = 15; test_peer_states(test).await; } @@ -329,7 +339,7 @@ async fn requesting_blocks_with_unreliable_peers( ) { let mut test = RequestingBlocksFromMultiplePeers::new(3, max_concurrent_blocks_per_peer); test.respond_probability = respond_probability; - test.peer_behavior[0].last_block_to_return = BlockNumber(5); - test.peer_behavior[1].last_block_to_return = BlockNumber(15); + test.peer_behavior[0].last_block_to_return = 5; + test.peer_behavior[1].last_block_to_return = 15; test_peer_states(test).await; } diff --git a/node/actors/sync_blocks/src/peers/tests/snapshots.rs b/node/actors/sync_blocks/src/peers/tests/snapshots.rs index 740e6412..bdb7116d 100644 --- a/node/actors/sync_blocks/src/peers/tests/snapshots.rs +++ b/node/actors/sync_blocks/src/peers/tests/snapshots.rs @@ -1,329 +1,9 @@ //! Tests related to snapshot storage. use super::*; -use crate::tests::{send_block, snapshot_sync_state, sync_state}; +use crate::tests::{send_block, sync_state}; use zksync_consensus_network::io::GetBlockError; -#[derive(Debug)] -struct UpdatingPeerStateWithStorageSnapshot; - -#[async_trait] -impl Test for UpdatingPeerStateWithStorageSnapshot { - const BLOCK_COUNT: usize = 5; - const GENESIS_BLOCK_NUMBER: usize = 2; - - fn tweak_config(&self, config: &mut Config) { - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - storage, - mut message_receiver, - mut events_receiver, - clock, - } = handles; - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - for stale_block_number in [1, 2] { - peer_states - .update(&peer_key, sync_state(&setup, stale_block_number)) - .unwrap(); - - // No new block requests should be issued. - clock.advance(BLOCK_SLEEP_INTERVAL); - sync::yield_now().await; - assert!(message_receiver.try_recv().is_none()); - } - - peer_states - .update(&peer_key, sync_state(&setup, 3)) - .unwrap(); - - // Check that the actor has sent a `get_block` request to the peer - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(3)); - - // Emulate the peer sending a correct response. - send_block(&setup, BlockNumber(3), response); - - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, PeerStateEvent::GotBlock(BlockNumber(3))) - }) - .await - .unwrap(); - - // Check that the block has been saved locally. - storage.wait_until_queued(ctx, BlockNumber(3)).await?; - Ok(()) - } -} - -#[tokio::test] -async fn updating_peer_state_with_storage_snapshot() { - test_peer_states(UpdatingPeerStateWithStorageSnapshot).await; -} - -#[derive(Debug)] -struct FilteringRequestsForSnapshotPeer; - -#[async_trait] -impl Test for FilteringRequestsForSnapshotPeer { - const BLOCK_COUNT: usize = 5; - - fn tweak_config(&self, config: &mut Config) { - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - mut message_receiver, - mut events_receiver, - clock, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, snapshot_sync_state(&setup, 2..=2)) - .unwrap(); - - // The peer should only be queried for blocks that it actually has (#2 in this case). - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(2)); - - // Emulate the peer sending a correct response. - send_block(&setup, BlockNumber(2), response); - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, PeerStateEvent::GotBlock(BlockNumber(2))) - }) - .await - .unwrap(); - - // No further requests should be made. - clock.advance(BLOCK_SLEEP_INTERVAL); - sync::yield_now().await; - assert!(message_receiver.try_recv().is_none()); - - // Emulate peer receiving / producing a new block. - peer_states - .update(&peer_key, snapshot_sync_state(&setup, 2..=3)) - .unwrap(); - - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response: block3_response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(3)); - - // Emulate another peer with full history. - let full_peer_key = rng.gen::().public(); - peer_states - .update(&full_peer_key, sync_state(&setup, 3)) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - // A node should only request block #1 from the peer; block #3 is already requested, - // and it has #2 locally. - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, full_peer_key); - assert_eq!(number, BlockNumber(1)); - - send_block(&setup, BlockNumber(1), response); - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, PeerStateEvent::GotBlock(BlockNumber(1))) - }) - .await - .unwrap(); - - drop(block3_response); // Emulate first peer disconnecting. - wait_for_event( - ctx, - &mut events_receiver, - |ev| matches!(ev,PeerStateEvent::PeerDropped(key) if key == peer_key), - ) - .await - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - - // Now, block #3 will be requested from the peer with full history. - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, number, .. - }) = message; - assert_eq!(recipient, full_peer_key); - assert_eq!(number, BlockNumber(3)); - - Ok(()) - } -} - -#[tokio::test] -async fn filtering_requests_for_snapshot_peer() { - test_peer_states(FilteringRequestsForSnapshotPeer).await; -} - -#[derive(Debug)] -struct PruningPeerHistory; - -#[async_trait] -impl Test for PruningPeerHistory { - const BLOCK_COUNT: usize = 5; - - fn tweak_config(&self, config: &mut Config) { - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - mut message_receiver, - mut events_receiver, - clock, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, sync_state(&setup, 1)) - .unwrap(); - - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response: block1_response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(1)); - - // Emulate peer pruning blocks. - peer_states - .update(&peer_key, snapshot_sync_state(&setup, 3..=3)) - .unwrap(); - - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - response, - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(3)); - - send_block(&setup, BlockNumber(3), response); - wait_for_event(ctx, &mut events_receiver, |ev| { - matches!(ev, PeerStateEvent::GotBlock(BlockNumber(3))) - }) - .await - .unwrap(); - - // No new blocks should be requested (the peer has no block #2). - clock.advance(BLOCK_SLEEP_INTERVAL); - sync::yield_now().await; - assert!(message_receiver.try_recv().is_none()); - - block1_response - .send(Err(GetBlockError::NotAvailable)) - .unwrap(); - // Block #1 should not be requested again (the peer no longer has it). - clock.advance(BLOCK_SLEEP_INTERVAL); - sync::yield_now().await; - assert!(message_receiver.try_recv().is_none()); - - Ok(()) - } -} - -#[tokio::test] -async fn pruning_peer_history() { - test_peer_states(PruningPeerHistory).await; -} - -#[derive(Debug)] -struct BackfillingPeerHistory; - -#[async_trait] -impl Test for BackfillingPeerHistory { - const BLOCK_COUNT: usize = 5; - - fn tweak_config(&self, config: &mut Config) { - config.sleep_interval_for_get_block = BLOCK_SLEEP_INTERVAL; - } - - async fn test(self, ctx: &ctx::Ctx, handles: TestHandles) -> anyhow::Result<()> { - let TestHandles { - setup, - peer_states, - mut message_receiver, - clock, - .. - } = handles; - - let rng = &mut ctx.rng(); - let peer_key = rng.gen::().public(); - peer_states - .update(&peer_key, snapshot_sync_state(&setup, 3..=3)) - .unwrap(); - - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, number, .. - }) = message; - assert_eq!(recipient, peer_key); - assert_eq!(number, BlockNumber(3)); - - peer_states - .update(&peer_key, sync_state(&setup, 3)) - .unwrap(); - clock.advance(BLOCK_SLEEP_INTERVAL); - let mut new_requested_numbers = HashSet::new(); - for _ in 0..2 { - let message = message_receiver.recv(ctx).await?; - let io::OutputMessage::Network(SyncBlocksInputMessage::GetBlock { - recipient, - number, - .. - }) = message; - assert_eq!(recipient, peer_key); - new_requested_numbers.insert(number); - } - assert_eq!( - new_requested_numbers, - HashSet::from([BlockNumber(1), BlockNumber(2)]) - ); - - Ok(()) - } -} - #[tokio::test] async fn backfilling_peer_history() { test_peer_states(BackfillingPeerHistory).await; diff --git a/node/actors/sync_blocks/src/tests/end_to_end.rs b/node/actors/sync_blocks/src/tests/end_to_end.rs index 96f254b1..018b99ee 100644 --- a/node/actors/sync_blocks/src/tests/end_to_end.rs +++ b/node/actors/sync_blocks/src/tests/end_to_end.rs @@ -7,12 +7,13 @@ use std::fmt; use test_casing::test_casing; use tracing::{instrument, Instrument}; use zksync_concurrency::{ - ctx, scope, + ctx, + ctx::channel, + scope, testonly::{abort_on_panic, set_timeout}, }; use zksync_consensus_network as network; use zksync_consensus_storage::testonly::new_store; -use zksync_consensus_utils::no_copy::NoCopy; type NetworkDispatcherPipe = pipe::DispatcherPipe; @@ -20,66 +21,38 @@ type NetworkDispatcherPipe = #[derive(Debug)] struct Node { store: Arc, - setup: Arc, - switch_on_sender: Option>, - _switch_off_sender: oneshot::Sender<()>, + start: channel::Sender<()>, + terminate: channel::Sender<()>, } impl Node { - async fn new_network( - ctx: &ctx::Ctx, - node_count: usize, - gossip_peers: usize, - ) -> (Vec, Vec) { - let rng = &mut ctx.rng(); - // NOTE: originally there were only 4 consensus nodes. - let mut setup = validator::testonly::GenesisSetup::new(rng, node_count); - setup.push_blocks(rng, 20); - let setup = Arc::new(setup); - let mut nodes = vec![]; - let mut runners = vec![]; - for net in network::testonly::new_configs(rng, &setup, gossip_peers) { - let (n, r) = Node::new(ctx, net, setup.clone()).await; - nodes.push(n); - runners.push(r); - } - (nodes, runners) - } - - async fn new( - ctx: &ctx::Ctx, - network: network::Config, - setup: Arc, - ) -> (Self, NodeRunner) { - let (store, store_runner) = new_store(ctx, &setup.blocks[0]).await; - let (switch_on_sender, switch_on_receiver) = oneshot::channel(); - let (switch_off_sender, switch_off_receiver) = oneshot::channel(); + async fn new(ctx: &ctx::Ctx, network: network::Config, setup: &Setup) -> (Self, NodeRunner) { + let (store, store_runner) = new_store(ctx, &setup.genesis).await; + let (start_send, start_recv) = channel::bounded(1); + let (terminate_send, terminate_recv) = channel::bounded(1); let runner = NodeRunner { network, store: store.clone(), store_runner, - setup: setup.clone(), - switch_on_receiver, - switch_off_receiver, + start: start_recv, + terminate: terminate_recv, }; let this = Self { store, - setup, - switch_on_sender: Some(switch_on_sender), - _switch_off_sender: switch_off_sender, + start: start_send, + terminate: terminate_send, }; (this, runner) } - fn switch_on(&mut self) { - self.switch_on_sender.take(); + fn start(&self) { + let _ = self.start.try_send(()); } - async fn put_block(&self, ctx: &ctx::Ctx, block_number: BlockNumber) { - tracing::trace!(%block_number, "Storing new block"); - let block = &self.setup.blocks[block_number.0 as usize]; - self.store.queue_block(ctx, block.clone()).await.unwrap(); + async fn terminate(&self, ctx: &ctx::Ctx) -> ctx::OrCanceled<()> { + let _ = self.terminate.try_send(()); + self.terminate.closed(ctx).await } } @@ -88,29 +61,25 @@ struct NodeRunner { network: network::Config, store: Arc, store_runner: BlockStoreRunner, - setup: Arc, - switch_on_receiver: oneshot::Receiver<()>, - switch_off_receiver: oneshot::Receiver<()>, + start: channel::Receiver<()>, + terminate: channel::Receiver<()>, } impl NodeRunner { - async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + async fn run(mut self, ctx: &ctx::Ctx) -> anyhow::Result<()> { tracing::info!("NodeRunner::run()"); let key = self.network.gossip.key.public(); let (sync_blocks_actor_pipe, sync_blocks_dispatcher_pipe) = pipe::new(); let (mut network, network_runner) = - network::testonly::Instance::new(self.network.clone(), self.store.clone()); - let sync_blocks_config = test_config(&self.setup); - scope::run!(ctx, |ctx, s| async { + network::testonly::Instance::new(ctx, self.network.clone(), self.store.clone()); + let sync_blocks_config = Config::new(); + let res = scope::run!(ctx, |ctx, s| async { s.spawn_bg(self.store_runner.run(ctx)); s.spawn_bg(network_runner.run(ctx)); network.wait_for_gossip_connections().await; tracing::info!("Node connected to peers"); - self.switch_on_receiver - .recv_or_disconnected(ctx) - .await? - .ok(); + self.start.recv(ctx).await?; tracing::info!("switch_on"); s.spawn_bg( async { @@ -123,11 +92,14 @@ impl NodeRunner { s.spawn_bg(sync_blocks_config.run(ctx, sync_blocks_actor_pipe, self.store.clone())); tracing::info!("Node is fully started"); - let _ = self.switch_off_receiver.recv_or_disconnected(ctx).await; - tracing::info!("Node stopped"); + let _ = self.terminate.recv(ctx).await; + tracing::info!("stopping"); Ok(()) }) - .await + .await; + drop(self.terminate); + tracing::info!("node stopped"); + res } async fn run_executor( @@ -164,7 +136,7 @@ impl NodeRunner { trait GossipNetworkTest: fmt::Debug + Send { /// Returns the number of nodes in the gossip network and number of peers for each node. fn network_params(&self) -> (usize, usize); - async fn test(self, ctx: &ctx::Ctx, network: Vec) -> anyhow::Result<()>; + async fn test(self, ctx: &ctx::Ctx, setup: &Setup, network: Vec) -> anyhow::Result<()>; } #[instrument(level = "trace")] @@ -172,13 +144,22 @@ async fn test_sync_blocks(test: T) { abort_on_panic(); let _guard = set_timeout(TEST_TIMEOUT); let ctx = &ctx::test_root(&ctx::AffineClock::new(25.)); + let rng = &mut ctx.rng(); let (node_count, gossip_peers) = test.network_params(); - let (nodes, runners) = Node::new_network(ctx, node_count, gossip_peers).await; + + let mut setup = validator::testonly::Setup::new(rng, node_count); + setup.push_blocks(rng, 10); scope::run!(ctx, |ctx, s| async { - for (i, runner) in runners.into_iter().enumerate() { + let mut nodes = vec![]; + for (i, net) in network::testonly::new_configs(rng, &setup, gossip_peers) + .into_iter() + .enumerate() + { + let (node, runner) = Node::new(ctx, net, &setup).await; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i))); + nodes.push(node); } - test.test(ctx, nodes).await + test.test(ctx, &setup, nodes).await }) .await .unwrap(); @@ -196,52 +177,43 @@ impl GossipNetworkTest for BasicSynchronization { (self.node_count, self.gossip_peers) } - async fn test(self, ctx: &ctx::Ctx, mut node_handles: Vec) -> anyhow::Result<()> { + async fn test(self, ctx: &ctx::Ctx, setup: &Setup, nodes: Vec) -> anyhow::Result<()> { let rng = &mut ctx.rng(); tracing::info!("Check initial node states"); - for node_handle in &mut node_handles { - node_handle.switch_on(); - let state = node_handle.store.subscribe().borrow().clone(); - assert_eq!(state.first.header().number, BlockNumber(0)); - assert_eq!(state.last.header().number, BlockNumber(0)); + for node in &nodes { + node.start(); + let state = node.store.subscribe().borrow().clone(); + assert_eq!(state.first, setup.genesis.fork.first_block); + assert_eq!(state.last, None); } - for block_number in (1..5).map(BlockNumber) { - let sending_node = node_handles.choose(rng).unwrap(); - sending_node.put_block(ctx, block_number).await; - - tracing::info!("Wait until all nodes get block #{block_number}"); - for node_handle in &mut node_handles { - node_handle - .store - .wait_until_persisted(ctx, block_number) - .await?; - tracing::info!("OK"); + for block in &setup.blocks[0..5] { + let node = nodes.choose(rng).unwrap(); + node.store.queue_block(ctx, block.clone()).await.unwrap(); + + tracing::info!("Wait until all nodes get block #{}", block.number()); + for node in &nodes { + node.store.wait_until_persisted(ctx, block.number()).await?; } } - let sending_node = node_handles.choose(rng).unwrap(); + let node = nodes.choose(rng).unwrap(); scope::run!(ctx, |ctx, s| async { // Add a batch of blocks. - for block_number in (5..10).rev().map(BlockNumber) { - let block_number = NoCopy::from(block_number); - s.spawn_bg(async { - sending_node.put_block(ctx, block_number.into_inner()).await; - Ok(()) - }); + for block in setup.blocks[5..].iter().rev() { + s.spawn_bg(node.store.queue_block(ctx, block.clone())); } // Wait until nodes get all new blocks. - for node_handle in &node_handles { - node_handle - .store - .wait_until_persisted(ctx, BlockNumber(9)) - .await?; + let last = setup.blocks.last().unwrap().number(); + for node in &nodes { + node.store.wait_until_persisted(ctx, last).await?; } Ok(()) }) - .await + .await?; + Ok(()) } } @@ -277,37 +249,38 @@ impl GossipNetworkTest for SwitchingOffNodes { (self.node_count, self.node_count / 2) } - async fn test(self, ctx: &ctx::Ctx, mut node_handles: Vec) -> anyhow::Result<()> { + async fn test(self, ctx: &ctx::Ctx, setup: &Setup, mut nodes: Vec) -> anyhow::Result<()> { let rng = &mut ctx.rng(); + nodes.shuffle(rng); - for node_handle in &mut node_handles { - node_handle.switch_on(); + for node in &nodes { + node.start(); } - let mut block_number = BlockNumber(1); - while !node_handles.is_empty() { - tracing::info!("{} nodes left", node_handles.len()); - - let sending_node = node_handles.choose(rng).unwrap(); - sending_node.put_block(ctx, block_number).await; - tracing::info!("block {block_number} inserted"); + for i in 0..nodes.len() { + tracing::info!("{} nodes left", nodes.len() - i); + let block = &setup.blocks[i]; + nodes[i..] + .choose(rng) + .unwrap() + .store + .queue_block(ctx, block.clone()) + .await + .unwrap(); + tracing::info!("block {} inserted", block.number()); // Wait until all remaining nodes get the new block. - for node_handle in &node_handles { - node_handle - .store - .wait_until_persisted(ctx, block_number) - .await?; + for node in &nodes[i..] { + node.store.wait_until_persisted(ctx, block.number()).await?; } - tracing::trace!("All nodes received block #{block_number}"); - block_number = block_number.next(); + tracing::info!("All nodes received block #{}", block.number()); - // Switch off a random node by dropping its handle. + // Terminate a random node. // We start switching off only after the first round, to make sure all nodes are fully // started. - let node_index_to_remove = rng.gen_range(0..node_handles.len()); - node_handles.swap_remove(node_index_to_remove); + nodes[i].terminate(ctx).await.unwrap(); } + tracing::info!("test finished, terminating"); Ok(()) } } @@ -329,30 +302,25 @@ impl GossipNetworkTest for SwitchingOnNodes { (self.node_count, self.node_count / 2) } - async fn test(self, ctx: &ctx::Ctx, mut node_handles: Vec) -> anyhow::Result<()> { + async fn test(self, ctx: &ctx::Ctx, setup: &Setup, mut nodes: Vec) -> anyhow::Result<()> { let rng = &mut ctx.rng(); - - let mut switched_on_nodes = Vec::with_capacity(self.node_count); - let mut block_number = BlockNumber(1); - while switched_on_nodes.len() < self.node_count { - // Switch on a random node. - let node_index_to_switch_on = rng.gen_range(0..node_handles.len()); - let mut node_handle = node_handles.swap_remove(node_index_to_switch_on); - node_handle.switch_on(); - switched_on_nodes.push(node_handle); - - let sending_node = switched_on_nodes.choose(rng).unwrap(); - sending_node.put_block(ctx, block_number).await; + nodes.shuffle(rng); + for i in 0..nodes.len() { + nodes[i].start(); // Switch on a node. + let block = &setup.blocks[i]; + nodes[0..i + 1] + .choose(rng) + .unwrap() + .store + .queue_block(ctx, block.clone()) + .await + .unwrap(); // Wait until all switched on nodes get the new block. - for node_handle in &mut switched_on_nodes { - node_handle - .store - .wait_until_persisted(ctx, block_number) - .await?; + for node in &nodes[0..i + 1] { + node.store.wait_until_persisted(ctx, block.number()).await?; } - tracing::trace!("All nodes received block #{block_number}"); - block_number = block_number.next(); + tracing::trace!("All nodes received block #{}", block.number()); } Ok(()) } diff --git a/node/actors/sync_blocks/src/tests/mod.rs b/node/actors/sync_blocks/src/tests/mod.rs index 54dd2fff..c7273669 100644 --- a/node/actors/sync_blocks/src/tests/mod.rs +++ b/node/actors/sync_blocks/src/tests/mod.rs @@ -1,13 +1,8 @@ //! Tests for the block syncing actor. use super::*; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -use std::ops; -use zksync_concurrency::{oneshot, time}; +use zksync_concurrency::time; use zksync_consensus_network::io::GetBlockError; -use zksync_consensus_roles::validator::{self, testonly::GenesisSetup, BlockNumber, ValidatorSet}; +use zksync_consensus_roles::validator::{self, testonly::Setup}; use zksync_consensus_storage::{BlockStore, BlockStoreRunner, BlockStoreState}; use zksync_consensus_utils::pipe; @@ -15,42 +10,15 @@ mod end_to_end; const TEST_TIMEOUT: time::Duration = time::Duration::seconds(20); -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> Config { - let validator_set: ValidatorSet = rng.gen(); - let consensus_threshold = validator_set.len(); - Config::new(validator_set, consensus_threshold).unwrap() - } -} - -pub(crate) fn test_config(setup: &GenesisSetup) -> Config { - Config::new(setup.validator_set(), setup.keys.len()).unwrap() -} - -pub(crate) fn sync_state(setup: &GenesisSetup, last_block_number: usize) -> BlockStoreState { - snapshot_sync_state(setup, 1..=last_block_number) -} - -pub(crate) fn snapshot_sync_state( - setup: &GenesisSetup, - range: ops::RangeInclusive, -) -> BlockStoreState { - assert!(!range.is_empty()); +pub(crate) fn sync_state(setup: &Setup, last: Option<&validator::FinalBlock>) -> BlockStoreState { BlockStoreState { - first: setup.blocks[*range.start()].justification.clone(), - last: setup.blocks[*range.end()].justification.clone(), + first: setup.genesis.fork.first_block, + last: last.map(|b| b.justification.clone()), } } -pub(crate) fn send_block( - setup: &GenesisSetup, - number: BlockNumber, - response: oneshot::Sender>, -) { - let block = setup - .blocks - .get(number.0 as usize) - .cloned() - .ok_or(GetBlockError::NotAvailable); - response.send(block).ok(); +pub(crate) fn make_response( + block: Option<&validator::FinalBlock>, +) -> Result { + block.cloned().ok_or(GetBlockError::NotAvailable) } diff --git a/node/libs/concurrency/src/ctx/mod.rs b/node/libs/concurrency/src/ctx/mod.rs index ac67f14d..f86275df 100644 --- a/node/libs/concurrency/src/ctx/mod.rs +++ b/node/libs/concurrency/src/ctx/mod.rs @@ -26,12 +26,14 @@ use std::{fmt, future::Future, pin::Pin, sync::Arc, task}; pub mod channel; mod clock; +mod no_copy; mod rng; mod testonly; #[cfg(test)] mod tests; pub use clock::*; +pub use no_copy::NoCopy; pub use testonly::*; /// Contexts are composed into a tree via `_parent` link. diff --git a/node/libs/utils/src/no_copy.rs b/node/libs/concurrency/src/ctx/no_copy.rs similarity index 50% rename from node/libs/utils/src/no_copy.rs rename to node/libs/concurrency/src/ctx/no_copy.rs index ebef1b33..63744bbe 100644 --- a/node/libs/utils/src/no_copy.rs +++ b/node/libs/concurrency/src/ctx/no_copy.rs @@ -1,25 +1,17 @@ //! No-copy wrapper allowing to carry a `Copy` type into a closure or an `async` block. -use std::ops; - /// No-copy wrapper allowing to carry a `Copy` type into a closure or an `async` block. #[derive(Clone, Debug)] -pub struct NoCopy(T); +pub struct NoCopy(pub T); -impl NoCopy { - /// Converts this wrapper to the contained value. - pub fn into_inner(self) -> T { +impl NoCopy { + /// Extracts the wrapped value. + pub fn into(self) -> T { self.0 } } -impl From for NoCopy { - fn from(value: T) -> Self { - Self(value) - } -} - -impl ops::Deref for NoCopy { +impl std::ops::Deref for NoCopy { type Target = T; fn deref(&self) -> &T { diff --git a/node/libs/concurrency/src/testonly.rs b/node/libs/concurrency/src/testonly.rs index c726eef9..67870101 100644 --- a/node/libs/concurrency/src/testonly.rs +++ b/node/libs/concurrency/src/testonly.rs @@ -38,6 +38,7 @@ pub fn abort_on_panic() { /// Guard which has to be dropped before timeout is reached. /// Otherwise the test will panic. #[allow(unused_tuple_struct_fields)] +#[must_use] pub struct TimeoutGuard(std::sync::mpsc::Sender<()>); /// Panics if (real time) timeout is reached before ctx is canceled. diff --git a/node/libs/roles/Cargo.toml b/node/libs/roles/Cargo.toml index 2af1edf9..89122a76 100644 --- a/node/libs/roles/Cargo.toml +++ b/node/libs/roles/Cargo.toml @@ -22,8 +22,11 @@ serde.workspace = true thiserror.workspace = true tracing.workspace = true +[dev-dependencies] +assert_matches.workspace = true + [build-dependencies] zksync_protobuf_build.workspace = true [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/node/libs/roles/src/proto/validator.proto b/node/libs/roles/src/proto/validator.proto index 9393369b..f1ec2438 100644 --- a/node/libs/roles/src/proto/validator.proto +++ b/node/libs/roles/src/proto/validator.proto @@ -4,6 +4,21 @@ package zksync.roles.validator; import "zksync/std.proto"; +message Fork { + optional uint64 number = 1; // required; ForkId + optional uint64 first_block = 2; // required; BlockNumber + optional BlockHeaderHash first_parent = 3; // optional +} + +message Genesis { + optional Fork fork = 1; // required + repeated PublicKey validators = 2; +} + +message GenesisHash { + optional bytes keccak256 = 1; // required +} + message PayloadHash { optional bytes keccak256 = 1; // required } @@ -14,7 +29,7 @@ message BlockHeaderHash { message BlockHeader { // Hash of the parent Block. - optional BlockHeaderHash parent = 2; // required + optional BlockHeaderHash parent = 2; // optional // Sequential number of the block = parent.number + 1. optional uint64 number = 3; // required // Hash of the block payload. @@ -26,6 +41,12 @@ message FinalBlock { optional CommitQC justification = 2; // required } +message View { + optional uint32 protocol_version = 1; // required; ProtocolVersion + optional uint64 fork = 2; // required; ForkId + optional uint64 number = 3; // required; ViewNumber +} + message ConsensusMsg { oneof t { // required ReplicaPrepare replica_prepare = 1; @@ -36,32 +57,28 @@ message ConsensusMsg { } message ReplicaPrepare { - optional uint32 protocol_version = 4; // required - optional uint64 view = 1; // required - optional ReplicaCommit high_vote = 2; // required - optional CommitQC high_qc = 3; // required + optional View view = 1; // required + optional ReplicaCommit high_vote = 2; // optional + optional CommitQC high_qc = 3; // optional } message ReplicaCommit { - optional uint32 protocol_version = 3; // required - optional uint64 view = 1; // required + optional View view = 1; // required optional BlockHeader proposal = 2; // required } message LeaderPrepare { - optional uint32 protocol_version = 5; // required - optional uint64 view = 1; // required - optional BlockHeader proposal = 2; // required - optional bytes proposal_payload = 3; // optional (depending on justification) - optional PrepareQC justification = 4; // required + optional BlockHeader proposal = 1; // required + optional bytes proposal_payload = 2; // optional (depending on justification) + optional PrepareQC justification = 3; // required } message LeaderCommit { - optional uint32 protocol_version = 2; // required optional CommitQC justification = 1; // required } message PrepareQC { + optional View view = 4; // required repeated ReplicaPrepare msgs = 1; // required repeated std.BitVector signers = 2; // required optional AggregateSignature sig = 3; // required diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index 33748b43..04886834 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,15 +1,68 @@ use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, - PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, - Signers, ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, + NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, + ReplicaPrepare, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, }; use crate::{node::SessionId, proto::validator as proto}; use anyhow::Context as _; use std::collections::BTreeMap; use zksync_consensus_crypto::ByteFmt; use zksync_consensus_utils::enum_util::Variant; -use zksync_protobuf::{read_required, required, ProtoFmt}; +use zksync_protobuf::{read_optional, read_required, required, ProtoFmt}; + +impl ProtoFmt for Fork { + type Proto = proto::Fork; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + number: ForkNumber(*required(&r.number).context("number")?), + first_block: BlockNumber(*required(&r.first_block).context("first_block")?), + first_parent: read_optional(&r.first_parent).context("first_parent")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + number: Some(self.number.0), + first_block: Some(self.first_block.0), + first_parent: self.first_parent.as_ref().map(|x| x.build()), + } + } +} + +impl ProtoFmt for Genesis { + type Proto = proto::Genesis; + fn read(r: &Self::Proto) -> anyhow::Result { + let validators: Vec<_> = r + .validators + .iter() + .enumerate() + .map(|(i, v)| PublicKey::read(v).context(i)) + .collect::>() + .context("validators")?; + Ok(Self { + fork: read_required(&r.fork).context("fork")?, + validators: ValidatorSet::new(validators.into_iter()).context("validators")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + fork: Some(self.fork.build()), + validators: self.validators.iter().map(|x| x.build()).collect(), + } + } +} + +impl ProtoFmt for GenesisHash { + type Proto = proto::GenesisHash; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self(ByteFmt::decode(required(&r.keccak256)?)?)) + } + fn build(&self) -> Self::Proto { + Self::Proto { + keccak256: Some(self.0.encode()), + } + } +} impl ProtoFmt for BlockHeaderHash { type Proto = proto::BlockHeaderHash; @@ -39,14 +92,14 @@ impl ProtoFmt for BlockHeader { type Proto = proto::BlockHeader; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - parent: read_required(&r.parent).context("parent")?, - number: BlockNumber(r.number.context("number")?), + parent: read_optional(&r.parent).context("parent")?, + number: BlockNumber(*required(&r.number).context("number")?), payload: read_required(&r.payload).context("payload")?, }) } fn build(&self) -> Self::Proto { Self::Proto { - parent: Some(self.parent.build()), + parent: self.parent.as_ref().map(ProtoFmt::build), number: Some(self.number.0), payload: Some(self.payload.build()), } @@ -99,24 +152,42 @@ impl ProtoFmt for ConsensusMsg { } } -impl ProtoFmt for ReplicaPrepare { - type Proto = proto::ReplicaPrepare; +impl ProtoFmt for View { + type Proto = proto::View; fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), - view: ViewNumber(*required(&r.view).context("view")?), - high_vote: read_required(&r.high_vote).context("high_vote")?, - high_qc: read_required(&r.high_qc).context("high_qc")?, + fork: ForkNumber(*required(&r.fork).context("fork")?), + number: ViewNumber(*required(&r.number).context("number")?), }) } fn build(&self) -> Self::Proto { Self::Proto { protocol_version: Some(self.protocol_version.0), - view: Some(self.view.0), - high_vote: Some(self.high_vote.build()), - high_qc: Some(self.high_qc.build()), + fork: Some(self.fork.0), + number: Some(self.number.0), + } + } +} + +impl ProtoFmt for ReplicaPrepare { + type Proto = proto::ReplicaPrepare; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + view: read_required(&r.view).context("view")?, + high_vote: read_optional(&r.high_vote).context("high_vote")?, + high_qc: read_optional(&r.high_qc).context("high_qc")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + view: Some(self.view.build()), + high_vote: self.high_vote.as_ref().map(ProtoFmt::build), + high_qc: self.high_qc.as_ref().map(ProtoFmt::build), } } } @@ -126,16 +197,14 @@ impl ProtoFmt for ReplicaCommit { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), - view: ViewNumber(*required(&r.view).context("view")?), + view: read_required(&r.view).context("view")?, proposal: read_required(&r.proposal).context("proposal")?, }) } fn build(&self) -> Self::Proto { Self::Proto { - protocol_version: Some(self.protocol_version.0), - view: Some(self.view.0), + view: Some(self.view.build()), proposal: Some(self.proposal.build()), } } @@ -146,8 +215,6 @@ impl ProtoFmt for LeaderPrepare { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), - view: ViewNumber(*required(&r.view).context("view")?), proposal: read_required(&r.proposal).context("proposal")?, proposal_payload: r.proposal_payload.as_ref().map(|p| Payload(p.clone())), justification: read_required(&r.justification).context("justification")?, @@ -156,8 +223,6 @@ impl ProtoFmt for LeaderPrepare { fn build(&self) -> Self::Proto { Self::Proto { - protocol_version: Some(self.protocol_version.0), - view: Some(self.view.0), proposal: Some(self.proposal.build()), proposal_payload: self.proposal_payload.as_ref().map(|p| p.0.clone()), justification: Some(self.justification.build()), @@ -170,14 +235,12 @@ impl ProtoFmt for LeaderCommit { fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - protocol_version: ProtocolVersion(r.protocol_version.context("protocol_version")?), justification: read_required(&r.justification).context("justification")?, }) } fn build(&self) -> Self::Proto { Self::Proto { - protocol_version: Some(self.protocol_version.0), justification: Some(self.justification.build()), } } @@ -209,6 +272,7 @@ impl ProtoFmt for PrepareQC { } Ok(Self { + view: read_required(&r.view).context("view")?, map, signature: read_required(&r.sig).context("sig")?, }) @@ -222,6 +286,7 @@ impl ProtoFmt for PrepareQC { .unzip(); Self::Proto { + view: Some(self.view.build()), msgs, signers, sig: Some(self.signature.build()), diff --git a/node/libs/roles/src/validator/messages/block.rs b/node/libs/roles/src/validator/messages/block.rs index 5ea7f486..261035e5 100644 --- a/node/libs/roles/src/validator/messages/block.rs +++ b/node/libs/roles/src/validator/messages/block.rs @@ -1,6 +1,6 @@ //! Messages related to blocks. -use super::CommitQC; +use super::{CommitQC, CommitQCVerifyError}; use std::fmt; use zksync_consensus_crypto::{keccak256::Keccak256, ByteFmt, Text, TextFmt}; @@ -64,8 +64,8 @@ impl BlockNumber { } /// Returns the previous block number. - pub fn prev(self) -> Self { - Self(self.0 - 1) + pub fn prev(self) -> Option { + Some(Self(self.0.checked_sub(1)?)) } } @@ -80,11 +80,6 @@ impl fmt::Display for BlockNumber { pub struct BlockHeaderHash(pub(crate) Keccak256); impl BlockHeaderHash { - /// Constant that the parent of the genesis block should be set to. - pub fn genesis_parent() -> Self { - Self(Keccak256::default()) - } - /// Interprets the specified `bytes` as a block header hash digest (i.e., a reverse operation to [`Self::as_bytes()`]). /// It is caller's responsibility to ensure that `bytes` are actually a block header hash digest. pub fn from_bytes(bytes: [u8; 32]) -> Self { @@ -122,7 +117,7 @@ impl fmt::Debug for BlockHeaderHash { #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct BlockHeader { /// Hash of the parent block. - pub parent: BlockHeaderHash, + pub parent: Option, /// Number of the block. pub number: BlockNumber, /// Payload of the block. @@ -135,19 +130,10 @@ impl BlockHeader { BlockHeaderHash(Keccak256::new(&zksync_protobuf::canonical(self))) } - /// Creates a genesis block. - pub fn genesis(payload: PayloadHash, number: BlockNumber) -> Self { - Self { - parent: BlockHeaderHash::genesis_parent(), - number, - payload, - } - } - /// Creates a child block for the given parent. - pub fn new(parent: &BlockHeader, payload: PayloadHash) -> Self { + pub fn next(parent: &BlockHeader, payload: PayloadHash) -> Self { Self { - parent: parent.hash(), + parent: Some(parent.hash()), number: parent.number.next(), payload, } @@ -166,7 +152,7 @@ pub struct FinalBlock { impl FinalBlock { /// Creates a new finalized block. pub fn new(payload: Payload, justification: CommitQC) -> Self { - assert_eq!(justification.message.proposal.payload, payload.hash()); + assert_eq!(justification.header().payload, payload.hash()); Self { payload, justification, @@ -178,12 +164,13 @@ impl FinalBlock { &self.justification.message.proposal } - /// Validates internal consistency of this block. - pub fn validate( - &self, - validators: &super::ValidatorSet, - consensus_threshold: usize, - ) -> Result<(), BlockValidationError> { + /// Number of the block. + pub fn number(&self) -> BlockNumber { + self.header().number + } + + /// Verifies internal consistency of this block. + pub fn verify(&self, genesis: &super::Genesis) -> Result<(), BlockValidationError> { let payload_hash = self.payload.hash(); if payload_hash != self.header().payload { return Err(BlockValidationError::HashMismatch { @@ -192,7 +179,7 @@ impl FinalBlock { }); } self.justification - .verify(validators, consensus_threshold) + .verify(genesis) .map_err(BlockValidationError::Justification) } } @@ -234,5 +221,5 @@ pub enum BlockValidationError { }, /// Failed verifying quorum certificate. #[error("failed verifying quorum certificate: {0:#?}")] - Justification(#[source] anyhow::Error), + Justification(#[source] CommitQCVerifyError), } diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 927f26f9..00ec37cb 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -1,11 +1,15 @@ //! Messages related to the consensus protocol. - -use super::{BlockHeader, Msg, Payload, Signed}; -use crate::{validator, validator::Signature}; -use anyhow::bail; +use super::{ + BlockHeaderHash, BlockNumber, LeaderCommit, LeaderPrepare, Msg, ReplicaCommit, ReplicaPrepare, +}; +use crate::validator; use bit_vec::BitVec; use serde::Serialize; -use std::collections::{BTreeMap, BTreeSet}; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, +}; +use zksync_consensus_crypto::{keccak256::Keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; /// Version of the consensus algorithm that the validator is using. @@ -41,6 +45,166 @@ impl TryFrom for ProtocolVersion { } } +/// Number of the fork. Newer fork has higher number. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ForkNumber(pub u64); + +impl ForkNumber { + /// Next fork number. + pub fn next(self) -> Self { + Self(self.0 + 1) + } +} + +/// Specification of a fork. +#[derive(Clone, Debug, PartialEq)] +pub struct Fork { + /// Number of the fork. + pub number: ForkNumber, + /// First block of a fork. + pub first_block: BlockNumber, + /// Parent fo the first block of a fork. + pub first_parent: Option, +} + +impl Default for Fork { + fn default() -> Self { + Self { + number: ForkNumber(0), + first_block: BlockNumber(0), + first_parent: None, + } + } +} + +/// A struct that represents a set of validators. It is used to store the current validator set. +/// We represent each validator by its validator public key. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ValidatorSet { + vec: Vec, + map: BTreeMap, +} + +impl ValidatorSet { + /// Creates a new ValidatorSet from a list of validator public keys. + pub fn new(validators: impl IntoIterator) -> anyhow::Result { + let mut set = BTreeSet::new(); + for validator in validators { + anyhow::ensure!(set.insert(validator), "Duplicate validator in ValidatorSet"); + } + anyhow::ensure!( + !set.is_empty(), + "ValidatorSet must contain at least one validator" + ); + Ok(Self { + vec: set.iter().cloned().collect(), + map: set.into_iter().enumerate().map(|(i, pk)| (pk, i)).collect(), + }) + } + + /// Iterates over validators. + pub fn iter(&self) -> impl Iterator { + self.vec.iter() + } + + /// Returns the number of validators. + #[allow(clippy::len_without_is_empty)] // a valid `ValidatorSet` is always non-empty by construction + pub fn len(&self) -> usize { + self.vec.len() + } + + /// Returns true if the given validator is in the validator set. + pub fn contains(&self, validator: &validator::PublicKey) -> bool { + self.map.contains_key(validator) + } + + /// Get validator by its index in the set. + pub fn get(&self, index: usize) -> Option<&validator::PublicKey> { + self.vec.get(index) + } + + /// Get the index of a validator in the set. + pub fn index(&self, validator: &validator::PublicKey) -> Option { + self.map.get(validator).copied() + } + + /// Computes the validator for the given view. + pub fn view_leader(&self, view_number: ViewNumber) -> validator::PublicKey { + let index = view_number.0 as usize % self.len(); + self.get(index).unwrap().clone() + } + + /// Signature threshold for this validator set. + pub fn threshold(&self) -> usize { + threshold(self.len()) + } + + /// Maximal number of faulty replicas allowed in this validator set. + pub fn faulty_replicas(&self) -> usize { + faulty_replicas(self.len()) + } +} + +/// Calculate the consensus threshold, the minimum number of votes for any consensus action to be valid, +/// for a given number of replicas. +pub fn threshold(n: usize) -> usize { + n - faulty_replicas(n) +} + +/// Calculate the maximum number of faulty replicas, for a given number of replicas. +pub fn faulty_replicas(n: usize) -> usize { + // Calculate the allowed maximum number of faulty replicas. We want the following relationship to hold: + // n = 5*f + 1 + // for n total replicas and f faulty replicas. This results in the following formula for the maximum + // number of faulty replicas: + // f = floor((n - 1) / 5) + // Because of this, it doesn't make sense to have 5*f + 2 or 5*f + 3 replicas. It won't increase the number + // of allowed faulty replicas. + (n - 1) / 5 +} + +/// Genesis of the blockchain, unique for each blockchain instance. +#[derive(Debug, Clone, PartialEq)] +pub struct Genesis { + // TODO(gprusak): add blockchain id here. + /// Set of validators of the chain. + pub validators: ValidatorSet, + /// Fork of the chain to follow. + pub fork: Fork, +} + +/// Hash of the genesis specification. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GenesisHash(pub(crate) Keccak256); + +impl Genesis { + /// Hash of the genesis. + pub fn hash(&self) -> GenesisHash { + GenesisHash(Keccak256::new(&zksync_protobuf::canonical(self))) + } +} + +impl TextFmt for GenesisHash { + fn decode(text: Text) -> anyhow::Result { + text.strip("genesis_hash:keccak256:")? + .decode_hex() + .map(Self) + } + + fn encode(&self) -> String { + format!( + "genesis_hash:keccak256:{}", + hex::encode(ByteFmt::encode(&self.0)) + ) + } +} + +impl fmt::Debug for GenesisHash { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str(&TextFmt::encode(self)) + } +} + /// Consensus messages. #[allow(missing_docs)] #[derive(Clone, Debug, PartialEq, Eq)] @@ -62,15 +226,20 @@ impl ConsensusMsg { } } - /// Protocol version of this message. - pub fn protocol_version(&self) -> ProtocolVersion { + /// View of this message. + pub fn view(&self) -> &View { match self { - Self::ReplicaPrepare(m) => m.protocol_version, - Self::ReplicaCommit(m) => m.protocol_version, - Self::LeaderPrepare(m) => m.protocol_version, - Self::LeaderCommit(m) => m.protocol_version, + Self::ReplicaPrepare(m) => &m.view, + Self::ReplicaCommit(m) => &m.view, + Self::LeaderPrepare(m) => m.view(), + Self::LeaderCommit(m) => m.view(), } } + + /// Protocol version of this message. + pub fn protocol_version(&self) -> ProtocolVersion { + self.view().protocol_version + } } impl Variant for ReplicaPrepare { @@ -121,225 +290,21 @@ impl Variant for LeaderCommit { } } -/// A Prepare message from a replica. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct ReplicaPrepare { - /// Protocol version. - pub protocol_version: ProtocolVersion, - /// The number of the current view. - pub view: ViewNumber, - /// The highest block that the replica has committed to. - pub high_vote: ReplicaCommit, - /// The highest CommitQC that the replica has seen. - pub high_qc: CommitQC, -} - -/// A Commit message from a replica. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ReplicaCommit { +/// View specification. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct View { /// Protocol version. pub protocol_version: ProtocolVersion, + /// Fork this message belongs to. + pub fork: ForkNumber, /// The number of the current view. - pub view: ViewNumber, - /// The header of the block that the replica is committing to. - pub proposal: BlockHeader, + pub number: ViewNumber, } -/// A Prepare message from a leader. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct LeaderPrepare { - /// Protocol version. - pub protocol_version: ProtocolVersion, - /// The number of the current view. - pub view: ViewNumber, - /// The header of the block that the leader is proposing. - pub proposal: BlockHeader, - /// Payload of the block that the leader is proposing. - /// `None` iff this is a reproposal. - pub proposal_payload: Option, - /// The PrepareQC that justifies this proposal from the leader. - pub justification: PrepareQC, -} - -/// A Commit message from a leader. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct LeaderCommit { - /// Protocol version. - pub protocol_version: ProtocolVersion, - /// The CommitQC that justifies the message from the leader. - pub justification: CommitQC, -} - -/// A quorum certificate of replica Prepare messages. Since not all Prepare messages are -/// identical (they have different high blocks and high QCs), we need to keep the high blocks -/// and high QCs in a map. We can still aggregate the signatures though. -#[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct PrepareQC { - /// Map from replica Prepare messages to the validators that signed them. - pub map: BTreeMap, - /// Aggregate signature of the replica Prepare messages. - pub signature: validator::AggregateSignature, -} - -impl PrepareQC { - /// View of the QC. - pub fn view(&self) -> ViewNumber { - self.map - .keys() - .map(|k| k.view) - .next() - .unwrap_or(ViewNumber(0)) - } - - /// Add a validator's signed message. - /// * `signed_message` - A valid signed `ReplicaPrepare` message. - /// * `validator_index` - The signer index in the validator set. - /// * `validator_set` - The validator set. - pub fn add( - &mut self, - signed_message: &Signed, - validator_index: usize, - validator_set: &ValidatorSet, - ) { - self.map - .entry(signed_message.msg.clone()) - .or_insert_with(|| Signers(BitVec::from_elem(validator_set.len(), false))) - .0 - .set(validator_index, true); - - self.signature.add(&signed_message.sig); - } - - /// Verifies the integrity of the PrepareQC. - pub fn verify( - &self, - view: ViewNumber, - validators: &ValidatorSet, - threshold: usize, - ) -> anyhow::Result<()> { - // First we check that all messages are for the same view number. - for msg in self.map.keys() { - if msg.view != view { - bail!("PrepareQC contains messages for different views!"); - } - } - - // Then we need to do some checks on the signers bit maps. - let mut bit_map = BitVec::from_elem(validators.len(), false); - let mut num_signers = 0; - - for signer_bitmap in self.map.values() { - let signers = signer_bitmap.0.clone(); - - if signers.len() != validators.len() { - bail!("Bit vector in PrepareQC has wrong length!"); - } - - if !signers.any() { - bail!("Empty bit vector in PrepareQC. We require at least one signer for every message!"); - } - - let mut intersection = bit_map.clone(); - intersection.and(&signers); - if intersection.any() { - bail!("Bit vectors in PrepareQC are not disjoint. We require that every validator signs at most one message!"); - } - bit_map.or(&signers); - - num_signers += signers.iter().filter(|b| *b).count(); - } - - // Verify that we have enough signers. - // TODO(gprusak): how about num_signers == threshold to make the certificates more uniform? - if num_signers < threshold { - bail!( - "Insufficient signers in PrepareQC.\nNumber of signers: {}\nThreshold: {}", - num_signers, - threshold - ); - } - - // Now we can verify the signature. - let messages_and_keys = self.map.clone().into_iter().flat_map(|(msg, signers)| { - validators - .iter() - .enumerate() - .filter(|(i, _)| signers.0[*i]) - .map(|(_, pk)| (msg.clone(), pk)) - .collect::>() - }); - - Ok(self.signature.verify_messages(messages_and_keys)?) - } -} - -/// A Commit Quorum Certificate. It is an aggregate of signed replica Commit messages. -/// The Quorum Certificate is supposed to be over identical messages, so we only need one message. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct CommitQC { - /// The replica Commit message that the QC is for. - pub message: ReplicaCommit, - /// The validators that signed this message. - pub signers: Signers, - /// The aggregate signature of the signed replica messages. - pub signature: validator::AggregateSignature, -} - -impl CommitQC { - /// Header of the certified block. - pub fn header(&self) -> &BlockHeader { - &self.message.proposal - } - - /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. - pub fn new(message: ReplicaCommit, validator_set: &ValidatorSet) -> Self { - Self { - message, - signers: Signers(BitVec::from_elem(validator_set.len(), false)), - signature: validator::AggregateSignature::default(), - } - } - - /// Add a validator's signature. - /// * `sig` - A valid signature. - /// * `validator_index` - The signer index in the validator set. - pub fn add(&mut self, sig: &Signature, validator_index: usize) { - self.signers.0.set(validator_index, true); - self.signature.add(sig); - } - - /// Verifies the signature of the CommitQC. - pub fn verify(&self, validators: &ValidatorSet, threshold: usize) -> anyhow::Result<()> { - let signers = self.signers.0.clone(); - - // First we to do some checks on the signers bit map. - if signers.len() != validators.len() { - bail!("Bit vector in CommitQC has wrong length!"); - } - - if !signers.any() { - bail!("Empty bit vector in CommitQC. We require at least one signer!"); - } - - // Verify that we have enough signers. - let num_signers = signers.iter().filter(|b| *b).count(); - - if num_signers < threshold { - bail!( - "Insufficient signers in CommitQC.\nNumber of signers: {}\nThreshold: {}", - num_signers, - threshold - ); - } - - // Now we can verify the signature. - let messages_and_keys = validators - .iter() - .enumerate() - .filter(|(i, _)| signers[*i]) - .map(|(_, pk)| (self.message, pk)); - - Ok(self.signature.verify_messages(messages_and_keys)?) +impl View { + /// Checks if `self` can occur after `b`. + pub fn after(&self, b: &Self) -> bool { + self.fork == b.fork && self.number > b.number && self.protocol_version >= b.protocol_version } } @@ -349,71 +314,46 @@ impl CommitQC { pub struct Signers(pub BitVec); impl Signers { - /// Returns the number of signers, i.e. the number of validators that signed + /// Constructs an empty signers set. + pub fn new(n: usize) -> Self { + Self(BitVec::from_elem(n, false)) + } + + /// Returns the number of signers, i.e. the number of validators that signed /// the particular message that this signer bitmap refers to. - pub fn len(&self) -> usize { + pub fn count(&self) -> usize { self.0.iter().filter(|b| *b).count() } + /// Size of the corresponding ValidatorSet. + pub fn len(&self) -> usize { + self.0.len() + } + /// Returns true if there are no signers. pub fn is_empty(&self) -> bool { self.0.none() } } -/// A struct that represents a set of validators. It is used to store the current validator set. -/// We represent each validator by its validator public key. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ValidatorSet { - vec: Vec, - map: BTreeMap, -} - -impl ValidatorSet { - /// Creates a new ValidatorSet from a list of validator public keys. - pub fn new(validators: impl IntoIterator) -> anyhow::Result { - let mut set = BTreeSet::new(); - - for validator in validators { - if !set.insert(validator) { - bail!("Duplicate validator in ValidatorSet"); - } - } - - if set.is_empty() { - bail!("ValidatorSet must contain at least one validator"); - } - - Ok(Self { - vec: set.iter().cloned().collect(), - map: set.into_iter().enumerate().map(|(i, pk)| (pk, i)).collect(), - }) - } - - /// Iterates over validators. - pub fn iter(&self) -> impl Iterator { - self.vec.iter() - } - - /// Returns the number of validators. - #[allow(clippy::len_without_is_empty)] // a valid `ValidatorSet` is always non-empty by construction - pub fn len(&self) -> usize { - self.vec.len() - } - - /// Returns true if the given validator is in the validator set. - pub fn contains(&self, validator: &validator::PublicKey) -> bool { - self.map.contains_key(validator) +impl std::ops::BitOrAssign<&Self> for Signers { + fn bitor_assign(&mut self, other: &Self) { + self.0.or(&other.0); } +} - /// Get validator by its index in the set. - pub fn get(&self, index: usize) -> Option<&validator::PublicKey> { - self.vec.get(index) +impl std::ops::BitAndAssign<&Self> for Signers { + fn bitand_assign(&mut self, other: &Self) { + self.0.and(&other.0); } +} - /// Get the index of a validator in the set. - pub fn index(&self, validator: &validator::PublicKey) -> Option { - self.map.get(validator).copied() +impl std::ops::BitAnd for &Signers { + type Output = Signers; + fn bitand(self, other: Self) -> Signers { + let mut this = self.clone(); + this &= other; + this } } @@ -426,11 +366,6 @@ impl ViewNumber { pub fn next(self) -> Self { Self(self.0 + 1) } - - /// Get the previous view number. - pub fn prev(self) -> Self { - Self(self.0 - 1) - } } /// An enum that represents the current phase of the consensus. diff --git a/node/libs/roles/src/validator/messages/leader_commit.rs b/node/libs/roles/src/validator/messages/leader_commit.rs new file mode 100644 index 00000000..f9eb7aa9 --- /dev/null +++ b/node/libs/roles/src/validator/messages/leader_commit.rs @@ -0,0 +1,125 @@ +use super::{BlockHeader, Genesis, ReplicaCommit, Signed, Signers, View}; +use crate::validator; + +/// A Commit message from a leader. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LeaderCommit { + /// The CommitQC that justifies the message from the leader. + pub justification: CommitQC, +} + +impl LeaderCommit { + /// Verifies LeaderCommit. + pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> { + self.justification.verify(genesis) + } + + /// View of this message. + pub fn view(&self) -> &View { + self.justification.view() + } +} + +/// A Commit Quorum Certificate. It is an aggregate of signed replica Commit messages. +/// The Quorum Certificate is supposed to be over identical messages, so we only need one message. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct CommitQC { + /// The replica Commit message that the QC is for. + pub message: ReplicaCommit, + /// The validators that signed this message. + pub signers: Signers, + /// The aggregate signature of the signed replica messages. + pub signature: validator::AggregateSignature, +} + +/// Error returned by `CommitQc::verify()`. +#[derive(thiserror::Error, Debug)] +pub enum CommitQCVerifyError { + /// Invalid message. + #[error("invalid message: {0:#}")] + InvalidMessage(#[source] anyhow::Error), + /// Bad signer set. + #[error("signers set doesn't match genesis")] + BadSignersSet, + /// Not enough signers. + #[error("not enough signers: got {got}, want {want}")] + NotEnoughSigners { + /// Got signers. + got: usize, + /// Want signers. + want: usize, + }, + /// Bad signature. + #[error("bad signature: {0:#}")] + BadSignature(#[source] validator::Error), +} + +impl CommitQC { + /// Header of the certified block. + pub fn header(&self) -> &BlockHeader { + &self.message.proposal + } + + /// View of this QC. + pub fn view(&self) -> &View { + &self.message.view + } + + /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. + pub fn new(message: ReplicaCommit, genesis: &Genesis) -> Self { + Self { + message, + signers: Signers::new(genesis.validators.len()), + signature: validator::AggregateSignature::default(), + } + } + + /// Add a validator's signature. + /// Signature is assumed to be already verified. + pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { + if self.message != msg.msg { + return; + }; + let Some(i) = genesis.validators.index(&msg.key) else { + return; + }; + if self.signers.0[i] { + return; + }; + self.signers.0.set(i, true); + self.signature.add(&msg.sig); + } + + /// Verifies the signature of the CommitQC. + pub fn verify(&self, genesis: &Genesis) -> Result<(), CommitQCVerifyError> { + use CommitQCVerifyError as Error; + self.message + .verify(genesis) + .map_err(Error::InvalidMessage)?; + if self.signers.len() != genesis.validators.len() { + return Err(Error::BadSignersSet); + } + + // Verify that we have enough signers. + let num_signers = self.signers.count(); + let threshold = genesis.validators.threshold(); + if num_signers < threshold { + return Err(Error::NotEnoughSigners { + got: num_signers, + want: threshold, + }); + } + + // Now we can verify the signature. + let messages_and_keys = genesis + .validators + .iter() + .enumerate() + .filter(|(i, _)| self.signers.0[*i]) + .map(|(_, pk)| (self.message.clone(), pk)); + + self.signature + .verify_messages(messages_and_keys) + .map_err(Error::BadSignature) + } +} diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs new file mode 100644 index 00000000..15580bb3 --- /dev/null +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -0,0 +1,267 @@ +use super::{ + BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, Genesis, Payload, ReplicaPrepare, + ReplicaPrepareVerifyError, Signed, Signers, View, +}; +use crate::validator; +use std::collections::{BTreeMap, HashMap}; + +/// A quorum certificate of replica Prepare messages. Since not all Prepare messages are +/// identical (they have different high blocks and high QCs), we need to keep the high blocks +/// and high QCs in a map. We can still aggregate the signatures though. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PrepareQC { + /// View of this QC. + pub view: View, + /// Map from replica Prepare messages to the validators that signed them. + pub map: BTreeMap, + /// Aggregate signature of the replica Prepare messages. + pub signature: validator::AggregateSignature, +} + +/// Error returned by `PrepareQC::verify()`. +#[derive(thiserror::Error, Debug)] +pub enum PrepareQCVerifyError { + /// Inconsistent views. + #[error("inconsistent views of signed messages")] + InconsistentViews, + /// Invalid message. + #[error("msg[{0}]: {1:#}")] + InvalidMessage(usize, ReplicaPrepareVerifyError), + /// Bad message format. + #[error(transparent)] + BadFormat(anyhow::Error), + /// Not enough signers. + #[error("not enough signers: got {got}, want {want}")] + NotEnoughSigners { + /// Got signers. + got: usize, + /// Want signers. + want: usize, + }, + /// Bad signature. + #[error("bad signature: {0:#}")] + BadSignature(validator::Error), +} + +impl PrepareQC { + /// Create a new empty instance for a given `ReplicaCommit` message and a validator set size. + pub fn new(view: View) -> Self { + Self { + view, + map: BTreeMap::new(), + signature: validator::AggregateSignature::default(), + } + } + + /// Get the highest block voted and check if there's a quorum of votes for it. To have a quorum + /// in this situation, we require 2*f+1 votes, where f is the maximum number of faulty replicas. + pub fn high_vote(&self, genesis: &Genesis) -> Option { + let mut count: HashMap<_, usize> = HashMap::new(); + for (msg, signers) in &self.map { + if let Some(v) = &msg.high_vote { + *count.entry(v.proposal).or_default() += signers.count(); + } + } + // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. + let min = 2 * genesis.validators.faulty_replicas() + 1; + count.into_iter().find(|x| x.1 >= min).map(|x| x.0) + } + + /// Get the highest CommitQC. + pub fn high_qc(&self) -> Option<&CommitQC> { + self.map + .keys() + .filter_map(|m| m.high_qc.as_ref()) + .max_by_key(|qc| qc.view().number) + } + + /// Add a validator's signed message. + /// Message is assumed to be already verified. + // TODO: check if there is already a message from that validator. + // TODO: verify the message inside instead. + pub fn add(&mut self, msg: &Signed, genesis: &Genesis) { + if msg.msg.view != self.view { + return; + } + let Some(i) = genesis.validators.index(&msg.key) else { + return; + }; + let e = self + .map + .entry(msg.msg.clone()) + .or_insert_with(|| Signers::new(genesis.validators.len())); + if e.0[i] { + return; + }; + e.0.set(i, true); + self.signature.add(&msg.sig); + } + + /// Verifies the integrity of the PrepareQC. + pub fn verify(&self, genesis: &Genesis) -> Result<(), PrepareQCVerifyError> { + use PrepareQCVerifyError as Error; + let mut sum = Signers::new(genesis.validators.len()); + // Check the ReplicaPrepare messages. + for (i, (msg, signers)) in self.map.iter().enumerate() { + if msg.view != self.view { + return Err(Error::InconsistentViews); + } + if signers.len() != sum.len() { + return Err(Error::BadFormat(anyhow::format_err!( + "msg[{i}].signers has wrong length" + ))); + } + if signers.is_empty() { + return Err(Error::BadFormat(anyhow::format_err!( + "msg[{i}] has no signers assigned" + ))); + } + if !(&sum & signers).is_empty() { + return Err(Error::BadFormat(anyhow::format_err!( + "overlapping signature sets for different messages" + ))); + } + msg.verify(genesis) + .map_err(|err| Error::InvalidMessage(i, err))?; + sum |= signers; + } + + // Verify that we have enough signers. + let threshold = genesis.validators.threshold(); + if sum.count() < threshold { + return Err(Error::NotEnoughSigners { + got: sum.count(), + want: threshold, + }); + } + // Now we can verify the signature. + let messages_and_keys = self.map.clone().into_iter().flat_map(|(msg, signers)| { + genesis + .validators + .iter() + .enumerate() + .filter(|(i, _)| signers.0[*i]) + .map(|(_, pk)| (msg.clone(), pk)) + .collect::>() + }); + // TODO(gprusak): This reaggregating is suboptimal. + self.signature + .verify_messages(messages_and_keys) + .map_err(Error::BadSignature) + } +} + +/// A Prepare message from a leader. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LeaderPrepare { + /// The header of the block that the leader is proposing. + pub proposal: BlockHeader, + /// Payload of the block that the leader is proposing. + /// `None` iff this is a reproposal. + pub proposal_payload: Option, + /// The PrepareQC that justifies this proposal from the leader. + pub justification: PrepareQC, +} + +/// Error returned by `LeaderPrepare::verify()`. +#[derive(thiserror::Error, Debug)] +pub enum LeaderPrepareVerifyError { + /// Justification + #[error("justification: {0:#}")] + Justification(PrepareQCVerifyError), + /// Bad block number. + #[error("bad block number: got {got:?}, want {want:?}")] + BadBlockNumber { + /// Correct proposal number. + want: BlockNumber, + /// Received proposal number. + got: BlockNumber, + }, + /// Bad parent hash. + #[error("bad parent hash: got {got:?}, want {want:?}")] + BadParentHash { + /// Correct parent hash. + want: Option, + /// Received parent hash. + got: Option, + }, + /// New block proposal when the previous proposal was not finalized. + #[error("new block proposal when the previous proposal was not finalized")] + ProposalWhenPreviousNotFinalized, + /// Mismatched payload. + #[error("block proposal with mismatched payload")] + ProposalMismatchedPayload, + /// Re-proposal without quorum. + #[error("block re-proposal without quorum for the re-proposal")] + ReproposalWithoutQuorum, + /// Re-proposal when the previous proposal was finalized. + #[error("block re-proposal when the previous proposal was finalized")] + ReproposalWhenFinalized, + /// Reproposed a bad block. + #[error("Reproposed a bad block")] + ReproposalBadBlock, +} + +impl LeaderPrepare { + /// View of the message. + pub fn view(&self) -> &View { + &self.justification.view + } + + /// Verifies LeaderPrepare. + pub fn verify(&self, genesis: &Genesis) -> Result<(), LeaderPrepareVerifyError> { + use LeaderPrepareVerifyError as Error; + self.justification + .verify(genesis) + .map_err(Error::Justification)?; + let high_vote = self.justification.high_vote(genesis); + let high_qc = self.justification.high_qc(); + + // Check that the proposal is valid. + match &self.proposal_payload { + // The leader proposed a new block. + Some(payload) => { + // Check that payload matches the header + if self.proposal.payload != payload.hash() { + return Err(Error::ProposalMismatchedPayload); + } + // Check that we finalized the previous block. + if high_vote.is_some() + && high_vote.as_ref() != high_qc.map(|qc| &qc.message.proposal) + { + return Err(Error::ProposalWhenPreviousNotFinalized); + } + let (want_parent, want_number) = match high_qc { + Some(qc) => (Some(qc.header().hash()), qc.header().number.next()), + None => (genesis.fork.first_parent, genesis.fork.first_block), + }; + if self.proposal.parent != want_parent { + return Err(Error::BadParentHash { + got: self.proposal.parent, + want: want_parent, + }); + } + if self.proposal.number != want_number { + return Err(Error::BadBlockNumber { + got: self.proposal.number, + want: want_number, + }); + } + } + None => { + let Some(high_vote) = &high_vote else { + return Err(Error::ReproposalWithoutQuorum); + }; + if let Some(high_qc) = &high_qc { + if high_vote.number == high_qc.header().number { + return Err(Error::ReproposalWhenFinalized); + } + } + if high_vote != &self.proposal { + return Err(Error::ReproposalBadBlock); + } + } + } + Ok(()) + } +} diff --git a/node/libs/roles/src/validator/messages/mod.rs b/node/libs/roles/src/validator/messages/mod.rs index 9f328870..bcb34689 100644 --- a/node/libs/roles/src/validator/messages/mod.rs +++ b/node/libs/roles/src/validator/messages/mod.rs @@ -3,9 +3,17 @@ mod block; mod consensus; mod discovery; +mod leader_commit; +mod leader_prepare; mod msg; +mod replica_commit; +mod replica_prepare; pub use block::*; pub use consensus::*; pub use discovery::*; +pub use leader_commit::*; +pub use leader_prepare::*; pub use msg::*; +pub use replica_commit::*; +pub use replica_prepare::*; diff --git a/node/libs/roles/src/validator/messages/replica_commit.rs b/node/libs/roles/src/validator/messages/replica_commit.rs new file mode 100644 index 00000000..963f8f39 --- /dev/null +++ b/node/libs/roles/src/validator/messages/replica_commit.rs @@ -0,0 +1,25 @@ +use super::{BlockHeader, Genesis, View}; + +/// A Commit message from a replica. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ReplicaCommit { + /// View of this message. + pub view: View, + /// The header of the block that the replica is committing to. + pub proposal: BlockHeader, +} + +impl ReplicaCommit { + /// Verifies the message. + pub fn verify(&self, genesis: &Genesis) -> anyhow::Result<()> { + anyhow::ensure!(self.view.fork == genesis.fork.number); + anyhow::ensure!(self.proposal.number >= genesis.fork.first_block); + if self.proposal.number == genesis.fork.first_block { + anyhow::ensure!( + self.proposal.parent == genesis.fork.first_parent, + "bad parent of the first block of the fork" + ); + } + Ok(()) + } +} diff --git a/node/libs/roles/src/validator/messages/replica_prepare.rs b/node/libs/roles/src/validator/messages/replica_prepare.rs new file mode 100644 index 00000000..830c1ac0 --- /dev/null +++ b/node/libs/roles/src/validator/messages/replica_prepare.rs @@ -0,0 +1,63 @@ +use super::{CommitQC, CommitQCVerifyError, ForkNumber, Genesis, ReplicaCommit, View}; + +/// A Prepare message from a replica. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ReplicaPrepare { + /// View of this message. + pub view: View, + /// The highest block that the replica has committed to. + pub high_vote: Option, + /// The highest CommitQC that the replica has seen. + pub high_qc: Option, +} + +/// Error returned by `ReplicaPrepare::verify()`. +#[derive(thiserror::Error, Debug)] +pub enum ReplicaPrepareVerifyError { + /// BadFork. + #[error("bad fork: got {got:?}, want {want:?}")] + BadFork { + /// got + got: ForkNumber, + /// want + want: ForkNumber, + }, + /// FutureHighVoteView. + #[error("high vote from the future")] + HighVoteFutureView, + /// FutureHighQCView. + #[error("high qc from the future")] + HighQCFutureView, + /// HighVote. + #[error("high_vote: {0:#}")] + HighVote(anyhow::Error), + /// HighQC. + #[error("high_qc: {0:#}")] + HighQC(CommitQCVerifyError), +} + +impl ReplicaPrepare { + /// Verifies the message. + pub fn verify(&self, genesis: &Genesis) -> Result<(), ReplicaPrepareVerifyError> { + use ReplicaPrepareVerifyError as Error; + if self.view.fork != genesis.fork.number { + return Err(Error::BadFork { + got: self.view.fork, + want: genesis.fork.number, + }); + } + if let Some(v) = &self.high_vote { + if self.view.number <= v.view.number { + return Err(Error::HighVoteFutureView); + } + v.verify(genesis).map_err(Error::HighVote)?; + } + if let Some(qc) = &self.high_qc { + if self.view.number <= qc.view().number { + return Err(Error::HighQCFutureView); + } + qc.verify(genesis).map_err(Error::HighQC)?; + } + Ok(()) + } +} diff --git a/node/libs/roles/src/validator/mod.rs b/node/libs/roles/src/validator/mod.rs index 66ae9a12..2091a1ad 100644 --- a/node/libs/roles/src/validator/mod.rs +++ b/node/libs/roles/src/validator/mod.rs @@ -9,6 +9,3 @@ mod messages; pub mod testonly; pub use self::{keys::*, messages::*}; -// TODO(gprusak): it should be ok to have an unsigned -// genesis. For now we need a way to bootstrap the chain. -pub use testonly::GenesisSetup; diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index 57e24c98..27290f29 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,11 +1,10 @@ //! Test-only utilities. use super::{ AggregateSignature, BlockHeader, BlockHeaderHash, BlockNumber, CommitQC, ConsensusMsg, - FinalBlock, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, - PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, SecretKey, Signature, - Signed, Signers, ValidatorSet, ViewNumber, + FinalBlock, Fork, ForkNumber, Genesis, GenesisHash, LeaderCommit, LeaderPrepare, Msg, MsgHash, + NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, + ReplicaPrepare, SecretKey, Signature, Signed, Signers, ValidatorSet, View, ViewNumber, }; -use anyhow::{bail, Context}; use bit_vec::BitVec; use rand::{ distributions::{Distribution, Standard}, @@ -15,114 +14,75 @@ use std::sync::Arc; use zksync_concurrency::time; use zksync_consensus_utils::enum_util::Variant; -/// Constructs a CommitQC with `CommitQC.message.proposal` matching header. -/// WARNING: it is not a fully correct CommitQC. -pub fn make_justification( - rng: &mut R, - header: &BlockHeader, - protocol_version: ProtocolVersion, -) -> CommitQC { - CommitQC { - message: ReplicaCommit { - protocol_version, - view: ViewNumber(header.number.0), - proposal: *header, - }, - signers: rng.gen(), - signature: rng.gen(), - } -} - -impl<'a> BlockBuilder<'a> { - /// Builds `GenesisSetup`. - pub fn push(self) { - let msgs: Vec<_> = self - .setup - .keys - .iter() - .map(|sk| sk.sign_msg(self.msg)) - .collect(); - let justification = CommitQC::from(&msgs, &self.setup.validator_set()).unwrap(); - self.setup.blocks.push(FinalBlock { - payload: self.payload, - justification, - }); - } - - /// Sets `protocol_version`. - pub fn protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { - self.msg.protocol_version = protocol_version; - self - } - - /// Sets `block_number`. - pub fn block_number(mut self, block_number: BlockNumber) -> Self { - self.msg.proposal.number = block_number; - self - } - - /// Sets `payload`. - pub fn payload(mut self, payload: Payload) -> Self { - self.msg.proposal.payload = payload.hash(); - self.payload = payload; - self - } -} - -/// GenesisSetup. +/// Test setup. #[derive(Debug, Clone)] -pub struct GenesisSetup { - /// Validators' secret keys. - pub keys: Vec, - /// Initial blocks. - pub blocks: Vec, -} - -/// Builder of GenesisSetup. -pub struct BlockBuilder<'a> { - setup: &'a mut GenesisSetup, - msg: ReplicaCommit, - payload: Payload, -} - -impl GenesisSetup { - /// Constructs GenesisSetup with no blocks. - pub fn empty(rng: &mut impl Rng, validators: usize) -> Self { - Self { - keys: (0..validators).map(|_| rng.gen()).collect(), +pub struct Setup(SetupInner); + +impl Setup { + /// New `Setup` with a given `fork`. + pub fn new_with_fork(rng: &mut impl Rng, validators: usize, fork: Fork) -> Self { + let keys: Vec = (0..validators).map(|_| rng.gen()).collect(); + let genesis = Genesis { + validators: ValidatorSet::new(keys.iter().map(|k| k.public())).unwrap(), + fork, + }; + Self(SetupInner { + keys, + genesis, blocks: vec![], - } + }) } - /// Constructs GenesisSetup with genesis block. + /// New `Setup`. pub fn new(rng: &mut impl Rng, validators: usize) -> Self { - let mut this = Self::empty(rng, validators); - this.push_block(rng.gen()); - this - } - - /// Returns a builder for the next block. - pub fn next_block(&mut self) -> BlockBuilder { - let parent = self.blocks.last().map(|b| b.justification.message); - let payload = Payload(vec![]); - BlockBuilder { - setup: self, - msg: ReplicaCommit { - protocol_version: parent - .map(|m| m.protocol_version) - .unwrap_or(ProtocolVersion::EARLIEST), - view: parent.map(|m| m.view.next()).unwrap_or(ViewNumber(0)), - proposal: parent - .map(|m| BlockHeader::new(&m.proposal, payload.hash())) - .unwrap_or(BlockHeader::genesis(payload.hash(), BlockNumber(0))), - }, - payload, + let fork = Fork { + number: ForkNumber(rng.gen_range(0..100)), + first_block: BlockNumber(rng.gen_range(0..100)), + first_parent: Some(rng.gen()), + }; + Self::new_with_fork(rng, validators, fork) + } + + /// Next block to finalize. + pub fn next(&self) -> BlockNumber { + match self.0.blocks.last() { + Some(b) => b.header().number.next(), + None => self.0.genesis.fork.first_block, } } /// Pushes the next block with the given payload. pub fn push_block(&mut self, payload: Payload) { - self.next_block().payload(payload).push(); + let view = View { + protocol_version: ProtocolVersion::EARLIEST, + fork: self.genesis.fork.number, + number: self + .0 + .blocks + .last() + .map(|b| b.justification.view().number.next()) + .unwrap_or(ViewNumber(0)), + }; + let proposal = match self.0.blocks.last() { + Some(b) => BlockHeader::next(b.header(), payload.hash()), + None => BlockHeader { + parent: self.genesis.fork.first_parent, + number: self.genesis.fork.first_block, + payload: payload.hash(), + }, + }; + let msg = ReplicaCommit { view, proposal }; + let mut justification = CommitQC::new(msg, &self.0.genesis); + for key in &self.0.keys { + justification.add( + &key.sign_msg(justification.message.clone()), + &self.0.genesis, + ); + } + self.0.blocks.push(FinalBlock { + payload, + justification, + }); } /// Pushes `count` blocks with a random payload. @@ -132,36 +92,28 @@ impl GenesisSetup { } } - /// ValidatorSet. - pub fn validator_set(&self) -> ValidatorSet { - ValidatorSet::new(self.keys.iter().map(|k| k.public())).unwrap() + /// Finds the block by the number. + pub fn block(&self, n: BlockNumber) -> Option<&FinalBlock> { + let first = self.0.blocks.first()?.number(); + self.0.blocks.get(n.0.checked_sub(first.0)? as usize) } } -/// Constructs a genesis block with random payload. -pub fn make_genesis_block(rng: &mut impl Rng, protocol_version: ProtocolVersion) -> FinalBlock { - let mut setup = GenesisSetup::new(rng, 3); - setup - .next_block() - .protocol_version(protocol_version) - .payload(rng.gen()) - .push(); - setup.blocks[0].clone() +/// Setup. +#[derive(Debug, Clone)] +pub struct SetupInner { + /// Validators' secret keys. + pub keys: Vec, + /// Past blocks. + pub blocks: Vec, + /// Genesis config. + pub genesis: Genesis, } -/// Constructs a random block with a given parent. -/// WARNING: this is not a fully correct FinalBlock. -pub fn make_block( - rng: &mut R, - parent: &BlockHeader, - protocol_version: ProtocolVersion, -) -> FinalBlock { - let payload: Payload = rng.gen(); - let header = BlockHeader::new(parent, payload.hash()); - let justification = make_justification(rng, &header, protocol_version); - FinalBlock { - payload, - justification, +impl std::ops::Deref for Setup { + type Target = SetupInner; + fn deref(&self) -> &Self::Target { + &self.0 } } @@ -176,69 +128,6 @@ impl AggregateSignature { } } -impl PrepareQC { - /// Creates a new PrepareQC from a list of *signed* replica Prepare messages and the current validator set. - pub fn from( - signed_messages: &[Signed], - validators: &ValidatorSet, - ) -> anyhow::Result { - // Get the view number from the messages, they must all be equal. - let view = signed_messages - .first() - .context("Empty signed messages vector")? - .msg - .view; - - // Create the messages map. - let mut prepare_qc = PrepareQC::default(); - - for signed_message in signed_messages { - if signed_message.msg.view != view { - bail!("Signed messages aren't all for the same view."); - } - - // Get index of the validator in the validator set. - let index = validators - .index(&signed_message.key) - .context("Message signer isn't in the validator set")?; - - prepare_qc.add(signed_message, index, validators); - } - - Ok(prepare_qc) - } -} - -impl CommitQC { - /// Creates a new CommitQC from a list of *signed* replica Commit messages and the current validator set. - /// * `signed_messages` - A list of valid `ReplicaCommit` signed messages. Must contain at least one item. - /// * `validators` - The validator set. - pub fn from( - signed_messages: &[Signed], - validators: &ValidatorSet, - ) -> anyhow::Result { - // Store the signed messages in a Hashmap. - let message = signed_messages[0].msg; - let mut commit_qc = CommitQC::new(message, validators); - - for signed_message in signed_messages { - // Check that the votes are all for the same message. - if signed_message.msg != message { - bail!("CommitQC can only be created from votes for the same message."); - } - - // Get index of the validator in the validator set. - let validator_index = validators - .index(&signed_message.key) - .context("Message signer isn't in the validator set")?; - - commit_qc.add(&signed_message.sig, validator_index); - } - - Ok(commit_qc) - } -} - impl Distribution for Standard { fn sample(&self, rng: &mut R) -> AggregateSignature { AggregateSignature(rng.gen()) @@ -275,6 +164,37 @@ impl Distribution for Standard { } } +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ForkNumber { + ForkNumber(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisHash { + GenesisHash(rng.gen()) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Fork { + Fork { + number: rng.gen(), + first_block: rng.gen(), + first_parent: Some(rng.gen()), + } + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Genesis { + Genesis { + validators: rng.gen(), + fork: rng.gen(), + } + } +} + impl Distribution for Standard { fn sample(&self, rng: &mut R) -> PayloadHash { PayloadHash(rng.gen()) @@ -316,7 +236,6 @@ impl Distribution for Standard { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> ReplicaPrepare { ReplicaPrepare { - protocol_version: rng.gen(), view: rng.gen(), high_vote: rng.gen(), high_qc: rng.gen(), @@ -327,7 +246,6 @@ impl Distribution for Standard { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> ReplicaCommit { ReplicaCommit { - protocol_version: rng.gen(), view: rng.gen(), proposal: rng.gen(), } @@ -337,8 +255,6 @@ impl Distribution for Standard { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> LeaderPrepare { LeaderPrepare { - protocol_version: rng.gen(), - view: rng.gen(), proposal: rng.gen(), proposal_payload: rng.gen(), justification: rng.gen(), @@ -349,7 +265,6 @@ impl Distribution for Standard { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> LeaderCommit { LeaderCommit { - protocol_version: rng.gen(), justification: rng.gen(), } } @@ -361,6 +276,7 @@ impl Distribution for Standard { let map = (0..n).map(|_| (rng.gen(), rng.gen())).collect(); PrepareQC { + view: rng.gen(), map, signature: rng.gen(), } @@ -397,6 +313,16 @@ impl Distribution for Standard { } } +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> View { + View { + protocol_version: rng.gen(), + fork: rng.gen(), + number: rng.gen(), + } + } +} + impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Phase { let i = rng.gen_range(0..2); diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index dc26f1cf..5f38b3d8 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -1,5 +1,7 @@ use super::*; -use rand::Rng; +use crate::validator::testonly::Setup; +use assert_matches::assert_matches; +use rand::{seq::SliceRandom, Rng}; use std::vec; use zksync_concurrency::ctx; use zksync_consensus_crypto::{ByteFmt, Text, TextFmt}; @@ -84,6 +86,10 @@ fn test_text_encoding() { let msg_hash: MsgHash = rng.gen(); let t = TextFmt::encode(&msg_hash); assert_eq!(msg_hash, Text::new(&t).decode::().unwrap()); + + let genesis_hash: GenesisHash = rng.gen(); + let t = TextFmt::encode(&genesis_hash); + assert_eq!(genesis_hash, Text::new(&t).decode::().unwrap()); } #[test] @@ -103,6 +109,9 @@ fn test_schema_encoding() { test_encode_random::(rng); test_encode_random::(rng); test_encode_random::(rng); + test_encode_random::(rng); + test_encode_random::(rng); + test_encode_random::(rng); } #[test] @@ -119,7 +128,7 @@ fn test_signature_verify() { let sig1 = key1.sign_hash(&msg1); // Matching key and message. - assert!(sig1.verify_hash(&msg1, &key1.public()).is_ok()); + sig1.verify_hash(&msg1, &key1.public()).unwrap(); // Mismatching message. assert!(sig1.verify_hash(&msg2, &key1.public()).is_err()); @@ -145,9 +154,9 @@ fn test_agg_signature_verify() { let agg_sig = AggregateSignature::aggregate(vec![&sig1, &sig2]); // Matching key and message. - assert!(agg_sig + agg_sig .verify_hash([(msg1, &key1.public()), (msg2, &key2.public())].into_iter()) - .is_ok()); + .unwrap(); // Mismatching message. assert!(agg_sig @@ -160,94 +169,114 @@ fn test_agg_signature_verify() { .is_err()); } +fn make_view(number: ViewNumber, setup: &Setup) -> View { + View { + protocol_version: ProtocolVersion::EARLIEST, + fork: setup.genesis.fork.number, + number, + } +} + +fn make_replica_commit(rng: &mut impl Rng, view: ViewNumber, setup: &Setup) -> ReplicaCommit { + ReplicaCommit { + view: make_view(view, setup), + proposal: rng.gen(), + } +} + +fn make_commit_qc(rng: &mut impl Rng, view: ViewNumber, setup: &Setup) -> CommitQC { + let mut qc = CommitQC::new(make_replica_commit(rng, view, setup), &setup.genesis); + for key in &setup.keys { + qc.add(&key.sign_msg(qc.message.clone()), &setup.genesis); + } + qc +} + +fn make_replica_prepare(rng: &mut impl Rng, view: ViewNumber, setup: &Setup) -> ReplicaPrepare { + ReplicaPrepare { + view: make_view(view, setup), + high_vote: { + let view = ViewNumber(rng.gen_range(0..view.0)); + Some(make_replica_commit(rng, view, setup)) + }, + high_qc: { + let view = ViewNumber(rng.gen_range(0..view.0)); + Some(make_commit_qc(rng, view, setup)) + }, + } +} + #[test] fn test_commit_qc() { + use CommitQCVerifyError as Error; let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let sk1: SecretKey = rng.gen(); - let sk2: SecretKey = rng.gen(); - let sk3: SecretKey = rng.gen(); - - let msg: ReplicaCommit = rng.gen(); - - let validator_set1 = ValidatorSet::new(vec![ - sk1.public(), - sk2.public(), - sk3.public(), - rng.gen(), - rng.gen(), - ]) - .unwrap(); - let validator_set2 = - ValidatorSet::new(vec![rng.gen(), rng.gen(), rng.gen(), rng.gen(), rng.gen()]).unwrap(); - let validator_set3 = ValidatorSet::new(vec![sk1.public(), sk2.public(), sk3.public()]).unwrap(); - - let qc = CommitQC::from( - &[sk1.sign_msg(msg), sk2.sign_msg(msg), sk3.sign_msg(msg)], - &validator_set1, - ) - .unwrap(); - - // Matching validator set and enough signers. - assert!(qc.verify(&validator_set1, 1).is_ok()); - assert!(qc.verify(&validator_set1, 2).is_ok()); - assert!(qc.verify(&validator_set1, 3).is_ok()); - - // Not enough signers. - assert!(qc.verify(&validator_set1, 4).is_err()); - - // Mismatching validator sets. - assert!(qc.verify(&validator_set2, 3).is_err()); - assert!(qc.verify(&validator_set3, 3).is_err()); + let setup1 = Setup::new(rng, 6); + let setup2 = Setup::new(rng, 6); + let genesis3 = Genesis { + validators: ValidatorSet::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + fork: setup1.genesis.fork.clone(), + }; + + for i in 0..setup1.keys.len() + 1 { + let view = rng.gen(); + let mut qc = CommitQC::new(make_replica_commit(rng, view, &setup1), &setup1.genesis); + for key in &setup1.keys[0..i] { + qc.add(&key.sign_msg(qc.message.clone()), &setup1.genesis); + } + if i >= setup1.genesis.validators.threshold() { + qc.verify(&setup1.genesis).unwrap(); + } else { + assert_matches!( + qc.verify(&setup1.genesis), + Err(Error::NotEnoughSigners { .. }) + ); + } + + // Mismatching validator sets. + assert!(qc.verify(&setup2.genesis).is_err()); + assert!(qc.verify(&genesis3).is_err()); + } } #[test] fn test_prepare_qc() { + use PrepareQCVerifyError as Error; let ctx = ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let sk1: SecretKey = rng.gen(); - let sk2: SecretKey = rng.gen(); - let sk3: SecretKey = rng.gen(); + let setup1 = Setup::new(rng, 6); + let setup2 = Setup::new(rng, 6); + let genesis3 = Genesis { + validators: ValidatorSet::new(setup1.genesis.validators.iter().take(3).cloned()).unwrap(), + fork: setup1.genesis.fork.clone(), + }; let view: ViewNumber = rng.gen(); - let mut msg1: ReplicaPrepare = rng.gen(); - let mut msg2: ReplicaPrepare = rng.gen(); - msg1.view = view; - msg2.view = view; - - let validator_set1 = ValidatorSet::new(vec![ - sk1.public(), - sk2.public(), - sk3.public(), - rng.gen(), - rng.gen(), - ]) - .unwrap(); - let validator_set2 = - ValidatorSet::new(vec![rng.gen(), rng.gen(), rng.gen(), rng.gen(), rng.gen()]).unwrap(); - let validator_set3 = ValidatorSet::new(vec![sk1.public(), sk2.public(), sk3.public()]).unwrap(); - - let agg_qc = PrepareQC::from( - &[ - sk1.sign_msg(msg1.clone()), - sk2.sign_msg(msg2), - sk3.sign_msg(msg1), - ], - &validator_set1, - ) - .unwrap(); - - // Matching validator set and enough signers. - assert!(agg_qc.verify(view, &validator_set1, 1).is_ok()); - assert!(agg_qc.verify(view, &validator_set1, 2).is_ok()); - assert!(agg_qc.verify(view, &validator_set1, 3).is_ok()); - - // Not enough signers. - assert!(agg_qc.verify(view, &validator_set1, 4).is_err()); - - // Mismatching validator sets. - assert!(agg_qc.verify(view, &validator_set2, 3).is_err()); - assert!(agg_qc.verify(view, &validator_set3, 3).is_err()); + let msgs: Vec<_> = (0..3) + .map(|_| make_replica_prepare(rng, view, &setup1)) + .collect(); + + for n in 0..setup1.keys.len() + 1 { + let mut qc = PrepareQC::new(msgs[0].view.clone()); + for key in &setup1.keys[0..n] { + qc.add( + &key.sign_msg(msgs.choose(rng).unwrap().clone()), + &setup1.genesis, + ); + } + if n >= setup1.genesis.validators.threshold() { + qc.verify(&setup1.genesis).unwrap(); + } else { + assert_matches!( + qc.verify(&setup1.genesis), + Err(Error::NotEnoughSigners { .. }) + ); + } + + // Mismatching validator sets. + assert!(qc.verify(&setup2.genesis).is_err()); + assert!(qc.verify(&genesis3).is_err()); + } } diff --git a/node/libs/storage/src/block_store/metrics.rs b/node/libs/storage/src/block_store/metrics.rs index 32dae966..70447ad7 100644 --- a/node/libs/storage/src/block_store/metrics.rs +++ b/node/libs/storage/src/block_store/metrics.rs @@ -4,9 +4,12 @@ use std::time; #[derive(Debug, vise::Metrics)] #[metrics(prefix = "zksync_consensus_storage_persistent_block_store")] pub(super) struct PersistentBlockStore { - /// Latency of a successful `state()` call. + /// Latency of a successful `genesis()` call. #[metrics(unit = vise::Unit::Seconds, buckets = vise::Buckets::LATENCIES)] - pub(super) state_latency: vise::Histogram, + pub(super) genesis_latency: vise::Histogram, + /// Latency of a successful `last()` call. + #[metrics(unit = vise::Unit::Seconds, buckets = vise::Buckets::LATENCIES)] + pub(super) last_latency: vise::Histogram, /// Latency of a successful `block()` call. #[metrics(unit = vise::Unit::Seconds, buckets = vise::Buckets::LATENCIES)] pub(super) block_latency: vise::Histogram, @@ -21,8 +24,8 @@ pub(super) static PERSISTENT_BLOCK_STORE: vise::Global = v #[derive(Debug, vise::Metrics)] #[metrics(prefix = "zksync_consensus_storage_block_store")] pub(super) struct BlockStore { - /// BlockNumber of the last queued block. - pub(super) last_queued_block: vise::Gauge, - /// BlockNumber of the last persisted block. - pub(super) last_persisted_block: vise::Gauge, + /// BlockNumber of the next block to queue. + pub(super) next_queued_block: vise::Gauge, + /// BlockNumber of the next block to persist. + pub(super) next_persisted_block: vise::Gauge, } diff --git a/node/libs/storage/src/block_store/mod.rs b/node/libs/storage/src/block_store/mod.rs index f58c551d..456bc2e1 100644 --- a/node/libs/storage/src/block_store/mod.rs +++ b/node/libs/storage/src/block_store/mod.rs @@ -1,4 +1,5 @@ //! Defines storage layer for finalized blocks. +use anyhow::Context as _; use std::{collections::VecDeque, fmt, sync::Arc}; use zksync_concurrency::{ctx, error::Wrap as _, sync}; use zksync_consensus_roles::validator; @@ -9,21 +10,27 @@ mod metrics; #[derive(Debug, Clone, PartialEq, Eq)] pub struct BlockStoreState { /// Stored block with the lowest number. - pub first: validator::CommitQC, + /// Currently always same as `genesis.first_block`. + pub first: validator::BlockNumber, /// Stored block with the highest number. - pub last: validator::CommitQC, + /// None iff store is empty. + pub last: Option, } impl BlockStoreState { /// Checks whether block with the given number is stored in the `BlockStore`. pub fn contains(&self, number: validator::BlockNumber) -> bool { - self.first.header().number <= number && number <= self.last.header().number + let Some(last) = &self.last else { return false }; + self.first <= number && number <= last.header().number } /// Number of the next block that can be stored in the `BlockStore`. /// (i.e. `last` + 1). pub fn next(&self) -> validator::BlockNumber { - self.last.header().number.next() + match &self.last { + Some(qc) => qc.header().number.next(), + None => self.first, + } } } @@ -32,12 +39,14 @@ impl BlockStoreState { /// Implementations **must** propagate context cancellation using [`StorageError::Canceled`]. #[async_trait::async_trait] pub trait PersistentBlockStore: fmt::Debug + Send + Sync { - /// Range of blocks available in storage. - /// PersistentBlockStore is expected to always contain at least 1 block, - /// and be append-only storage (never delete blocks). + /// Genesis matching the block store content. + /// Consensus code calls this method only once. + async fn genesis(&self, ctx: &ctx::Ctx) -> ctx::Result; + + /// Last block available in storage. /// Consensus code calls this method only once and then tracks the /// range of available blocks internally. - async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result; + async fn last(&self, ctx: &ctx::Ctx) -> ctx::Result>; /// Gets a block by its number. /// Returns error if block is missing. @@ -74,6 +83,7 @@ struct Inner { pub struct BlockStore { inner: sync::watch::Sender, persistent: Box, + genesis: validator::Genesis, } /// Runner of the BlockStore background tasks. @@ -110,7 +120,7 @@ impl BlockStoreRunner { self.0.inner.send_modify(|inner| { debug_assert_eq!(inner.persisted_state.next(), block.header().number); - inner.persisted_state.last = block.justification.clone(); + inner.persisted_state.last = Some(block.justification.clone()); inner.queue.pop_front(); }); } @@ -132,24 +142,43 @@ impl BlockStore { ctx: &ctx::Ctx, persistent: Box, ) -> ctx::Result<(Arc, BlockStoreRunner)> { - let t = metrics::PERSISTENT_BLOCK_STORE.state_latency.start(); - let state = persistent.state(ctx).await.wrap("persistent.state()")?; + let t = metrics::PERSISTENT_BLOCK_STORE.genesis_latency.start(); + let genesis = persistent.genesis(ctx).await.wrap("persistent.genesis()")?; + t.observe(); + let t = metrics::PERSISTENT_BLOCK_STORE.last_latency.start(); + let last = persistent.last(ctx).await.wrap("persistent.last()")?; t.observe(); - if state.first.header().number > state.last.header().number { - return Err(anyhow::anyhow!("invalid state").into()); + if let Some(last) = &last { + last.verify(&genesis).context("last.verify()")?; } + let state = BlockStoreState { + first: genesis.fork.first_block, + last, + }; let this = Arc::new(Self { - persistent, inner: sync::watch::channel(Inner { queued_state: sync::watch::channel(state.clone()).0, persisted_state: state, queue: VecDeque::new(), }) .0, + genesis, + persistent, }); + // Verify the first block. + if let Some(block) = this.block(ctx, this.genesis.fork.first_block).await? { + block + .verify(&this.genesis) + .with_context(|| format!("verify({:?})", this.genesis.fork.first_block))?; + } Ok((this.clone(), BlockStoreRunner(this))) } + /// Genesis specification for this block store. + pub fn genesis(&self) -> &validator::Genesis { + &self.genesis + } + /// Fetches a block (from queue or persistent storage). pub async fn block( &self, @@ -188,19 +217,35 @@ impl BlockStore { &self, ctx: &ctx::Ctx, block: validator::FinalBlock, - ) -> ctx::OrCanceled<()> { - let number = block.header().number; - sync::wait_for(ctx, &mut self.subscribe(), |queued_state| { - queued_state.next() >= number - }) - .await?; + ) -> ctx::Result<()> { + let number = block.number(); + { + let sub = &mut self.subscribe(); + let queued_state = + sync::wait_for(ctx, sub, |queued_state| queued_state.next() >= number).await?; + if queued_state.next() > number { + return Ok(()); + } + block.verify(&self.genesis).context("block.verify()")?; + // Verify parent hash, if previous block is available. + if let Some(last) = queued_state.last.as_ref() { + if Some(last.header().hash()) != block.header().parent { + return Err(anyhow::format_err!( + "block.parent = {:?}, want {:?}", + block.header().parent, + last.header().hash() + ) + .into()); + } + } + } self.inner.send_if_modified(|inner| { let modified = inner.queued_state.send_if_modified(|queued_state| { // It may happen that the same block is queued_state by 2 calls. if queued_state.next() != number { return false; } - queued_state.last = block.justification.clone(); + queued_state.last = Some(block.justification.clone()); true }); if !modified { @@ -212,14 +257,14 @@ impl BlockStore { Ok(()) } - /// Waits until the given block is queued_state to be stored. + /// Waits until the given block is queued to be stored. pub async fn wait_until_queued( &self, ctx: &ctx::Ctx, number: validator::BlockNumber, ) -> ctx::OrCanceled<()> { sync::wait_for(ctx, &mut self.subscribe(), |queued_state| { - queued_state.contains(number) + number < queued_state.next() }) .await?; Ok(()) @@ -232,7 +277,7 @@ impl BlockStore { number: validator::BlockNumber, ) -> ctx::OrCanceled<()> { sync::wait_for(ctx, &mut self.inner.subscribe(), |inner| { - inner.persisted_state.contains(number) + number < inner.persisted_state.next() }) .await?; Ok(()) @@ -247,10 +292,9 @@ impl BlockStore { fn scrape_metrics(&self) -> metrics::BlockStore { let m = metrics::BlockStore::default(); let inner = self.inner.borrow(); - m.last_queued_block - .set(inner.queued_state.borrow().last.header().number.0); - m.last_persisted_block - .set(inner.persisted_state.last.header().number.0); + m.next_queued_block + .set(inner.queued_state.borrow().next().0); + m.next_persisted_block.set(inner.persisted_state.next().0); m } } diff --git a/node/libs/storage/src/proto/mod.proto b/node/libs/storage/src/proto/mod.proto index 594ef1b0..e06e84da 100644 --- a/node/libs/storage/src/proto/mod.proto +++ b/node/libs/storage/src/proto/mod.proto @@ -5,14 +5,14 @@ package zksync.storage; import "zksync/roles/validator.proto"; message Proposal { - optional uint64 number = 1; - optional bytes payload = 2; + optional uint64 number = 1; // required; BlockNumber + optional bytes payload = 2; // required } message ReplicaState { - optional uint64 view = 1; - optional roles.validator.Phase phase = 2; - optional roles.validator.ReplicaCommit high_vote = 3; - optional roles.validator.CommitQC high_qc = 4; + optional uint64 view = 1; // required; ViewNumber + optional roles.validator.Phase phase = 2; // required + optional roles.validator.ReplicaCommit high_vote = 3; // optional + optional roles.validator.CommitQC high_qc = 4; // optional repeated Proposal proposals = 5; } diff --git a/node/libs/storage/src/replica_store.rs b/node/libs/storage/src/replica_store.rs index 6243cc72..0eaa96fd 100644 --- a/node/libs/storage/src/replica_store.rs +++ b/node/libs/storage/src/replica_store.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use std::fmt; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; -use zksync_protobuf::{read_required, required, ProtoFmt}; +use zksync_protobuf::{read_optional, read_required, required, ProtoFmt}; /// Storage for [`ReplicaState`]. /// @@ -12,7 +12,7 @@ use zksync_protobuf::{read_required, required, ProtoFmt}; #[async_trait::async_trait] pub trait ReplicaStore: fmt::Debug + Send + Sync { /// Gets the replica state, if it is contained in the database. - async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result>; + async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result; /// Stores the given replica state into the database. async fn set_state(&self, ctx: &ctx::Ctx, state: &ReplicaState) -> ctx::Result<()>; @@ -39,20 +39,20 @@ pub struct ReplicaState { /// The current phase. pub phase: validator::Phase, /// The highest block proposal that the replica has committed to. - pub high_vote: validator::ReplicaCommit, + pub high_vote: Option, /// The highest commit quorum certificate known to the replica. - pub high_qc: validator::CommitQC, + pub high_qc: Option, /// A cache of the received block proposals. pub proposals: Vec, } -impl From for ReplicaState { - fn from(certificate: validator::CommitQC) -> Self { +impl Default for ReplicaState { + fn default() -> Self { Self { - view: certificate.message.view, + view: validator::ViewNumber(0), phase: validator::Phase::Prepare, - high_vote: certificate.message, - high_qc: certificate, + high_vote: None, + high_qc: None, proposals: vec![], } } @@ -83,8 +83,8 @@ impl ProtoFmt for ReplicaState { Ok(Self { view: validator::ViewNumber(r.view.context("view_number")?), phase: read_required(&r.phase).context("phase")?, - high_vote: read_required(&r.high_vote).context("high_vote")?, - high_qc: read_required(&r.high_qc).context("high_qc")?, + high_vote: read_optional(&r.high_vote).context("high_vote")?, + high_qc: read_optional(&r.high_qc).context("high_qc")?, proposals: r .proposals .iter() @@ -98,8 +98,8 @@ impl ProtoFmt for ReplicaState { Self::Proto { view: Some(self.view.0), phase: Some(self.phase.build()), - high_vote: Some(self.high_vote.build()), - high_qc: Some(self.high_qc.build()), + high_vote: self.high_vote.as_ref().map(|x| x.build()), + high_qc: self.high_qc.as_ref().map(|x| x.build()), proposals: self.proposals.iter().map(|p| p.build()).collect(), } } diff --git a/node/libs/storage/src/testonly/in_memory.rs b/node/libs/storage/src/testonly/in_memory.rs index d34f09cd..209d046c 100644 --- a/node/libs/storage/src/testonly/in_memory.rs +++ b/node/libs/storage/src/testonly/in_memory.rs @@ -1,36 +1,51 @@ //! In-memory storage implementation. -use crate::{BlockStoreState, PersistentBlockStore, ReplicaState}; +use crate::{PersistentBlockStore, ReplicaState}; use anyhow::Context as _; -use std::{collections::VecDeque, sync::Mutex}; +use std::{ + collections::VecDeque, + sync::{Arc, Mutex}, +}; use zksync_concurrency::ctx; use zksync_consensus_roles::validator; +#[derive(Debug)] +struct BlockStoreInner { + genesis: validator::Genesis, + blocks: Mutex>, +} + /// In-memory block store. -#[derive(Debug, Default)] -pub struct BlockStore(Mutex>); +#[derive(Clone, Debug)] +pub struct BlockStore(Arc); /// In-memory replica store. -#[derive(Debug, Default)] -pub struct ReplicaStore(Mutex>); +#[derive(Clone, Debug, Default)] +pub struct ReplicaStore(Arc>); impl BlockStore { - /// Creates a new store containing only the specified `genesis_block`. - pub fn new(genesis: validator::FinalBlock) -> Self { - Self(Mutex::new([genesis].into())) + /// New In-memory `BlockStore`. + pub fn new(genesis: validator::Genesis) -> Self { + Self(Arc::new(BlockStoreInner { + genesis, + blocks: Mutex::default(), + })) } } #[async_trait::async_trait] impl PersistentBlockStore for BlockStore { - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { - let blocks = self.0.lock().unwrap(); - if blocks.is_empty() { - return Err(anyhow::anyhow!("store is empty").into()); - } - Ok(BlockStoreState { - first: blocks.front().unwrap().justification.clone(), - last: blocks.back().unwrap().justification.clone(), - }) + async fn genesis(&self, _ctx: &ctx::Ctx) -> ctx::Result { + Ok(self.0.genesis.clone()) + } + + async fn last(&self, _ctx: &ctx::Ctx) -> ctx::Result> { + Ok(self + .0 + .blocks + .lock() + .unwrap() + .back() + .map(|b| b.justification.clone())) } async fn block( @@ -38,7 +53,7 @@ impl PersistentBlockStore for BlockStore { _ctx: &ctx::Ctx, number: validator::BlockNumber, ) -> ctx::Result { - let blocks = self.0.lock().unwrap(); + let blocks = self.0.blocks.lock().unwrap(); let front = blocks.front().context("not found")?; let idx = number .0 @@ -52,7 +67,7 @@ impl PersistentBlockStore for BlockStore { _ctx: &ctx::Ctx, block: &validator::FinalBlock, ) -> ctx::Result<()> { - let mut blocks = self.0.lock().unwrap(); + let mut blocks = self.0.blocks.lock().unwrap(); let got = block.header().number; if let Some(last) = blocks.back() { let want = last.header().number.next(); @@ -67,12 +82,12 @@ impl PersistentBlockStore for BlockStore { #[async_trait::async_trait] impl crate::ReplicaStore for ReplicaStore { - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result> { + async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(self.0.lock().unwrap().clone()) } async fn set_state(&self, _ctx: &ctx::Ctx, state: &ReplicaState) -> ctx::Result<()> { - *self.0.lock().unwrap() = Some(state.clone()); + *self.0.lock().unwrap() = state.clone(); Ok(()) } } diff --git a/node/libs/storage/src/testonly/mod.rs b/node/libs/storage/src/testonly/mod.rs index 5977f10e..3a44800b 100644 --- a/node/libs/storage/src/testonly/mod.rs +++ b/node/libs/storage/src/testonly/mod.rs @@ -1,5 +1,6 @@ //! Test-only utilities. use crate::{BlockStore, BlockStoreRunner, PersistentBlockStore, Proposal, ReplicaState}; +use anyhow::Context as _; use rand::{distributions::Standard, prelude::Distribution, Rng}; use std::sync::Arc; use zksync_concurrency::ctx; @@ -31,7 +32,7 @@ impl Distribution for Standard { /// Constructs a new in-memory store with a genesis block. pub async fn new_store( ctx: &ctx::Ctx, - genesis: &validator::FinalBlock, + genesis: &validator::Genesis, ) -> (Arc, BlockStoreRunner) { BlockStore::new(ctx, Box::new(in_memory::BlockStore::new(genesis.clone()))) .await @@ -40,14 +41,40 @@ pub async fn new_store( /// Dumps all the blocks stored in `store`. pub async fn dump(ctx: &ctx::Ctx, store: &dyn PersistentBlockStore) -> Vec { - let range = store.state(ctx).await.unwrap(); + let genesis = store.genesis(ctx).await.unwrap(); + let last = store.last(ctx).await.unwrap(); let mut blocks = vec![]; - for n in range.first.header().number.0..range.next().0 { - let n = validator::BlockNumber(n); + let begin = genesis.fork.first_block; + let end = last + .as_ref() + .map(|qc| qc.header().number.next()) + .unwrap_or(begin); + for n in (begin.0..end.0).map(validator::BlockNumber) { let block = store.block(ctx, n).await.unwrap(); assert_eq!(block.header().number, n); blocks.push(block); } - assert!(store.block(ctx, range.next()).await.is_err()); + assert!(store.block(ctx, end).await.is_err()); blocks } + +/// Verifies storage content. +pub async fn verify(ctx: &ctx::Ctx, store: &BlockStore) -> anyhow::Result<()> { + let range = store.subscribe().borrow().clone(); + let mut parent: Option = None; + for n in (range.first.0..range.next().0).map(validator::BlockNumber) { + async { + let block = store.block(ctx, n).await?.context("missing")?; + block.verify(store.genesis())?; + // Ignore checking the first block parent + if parent.is_some() { + anyhow::ensure!(parent == block.header().parent); + } + parent = Some(block.header().hash()); + Ok(()) + } + .await + .context(n)?; + } + Ok(()) +} diff --git a/node/libs/storage/src/tests.rs b/node/libs/storage/src/tests.rs index 723fd250..4a50c8a0 100644 --- a/node/libs/storage/src/tests.rs +++ b/node/libs/storage/src/tests.rs @@ -1,19 +1,20 @@ use super::*; use crate::{testonly::new_store, ReplicaState}; use zksync_concurrency::{ctx, scope, sync, testonly::abort_on_panic}; -use zksync_consensus_roles::validator; +use zksync_consensus_roles::validator::testonly::Setup; #[tokio::test] async fn test_inmemory_block_store() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let store = &testonly::in_memory::BlockStore::default(); - let mut setup = validator::testonly::GenesisSetup::empty(rng, 3); + let mut setup = Setup::new(rng, 3); setup.push_blocks(rng, 5); + + let store = &testonly::in_memory::BlockStore::new(setup.genesis.clone()); let mut want = vec![]; - for block in setup.blocks { - store.store_next_block(ctx, &block).await.unwrap(); - want.push(block); + for block in &setup.blocks { + store.store_next_block(ctx, block).await.unwrap(); + want.push(block.clone()); assert_eq!(want, testonly::dump(ctx, store).await); } } @@ -30,29 +31,27 @@ async fn test_state_updates() { abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - let mut genesis = validator::testonly::GenesisSetup::new(rng, 1); - genesis.push_blocks(rng, 1); - - let (store, runner) = new_store(ctx, &genesis.blocks[0]).await; + let mut setup = Setup::new(rng, 1); + setup.push_blocks(rng, 1); + let (store, runner) = new_store(ctx, &setup.genesis).await; scope::run!(ctx, |ctx, s| async { s.spawn_bg(runner.run(ctx)); let sub = &mut store.subscribe(); let state = sub.borrow().clone(); - assert_eq!(state.first, genesis.blocks[0].justification); - assert_eq!(state.last, genesis.blocks[0].justification); + assert_eq!(state.first, setup.genesis.fork.first_block); + assert_eq!(state.last, None); store - .queue_block(ctx, genesis.blocks[1].clone()) + .queue_block(ctx, setup.blocks[0].clone()) .await .unwrap(); let state = sync::wait_for(ctx, sub, |state| { - state.last == genesis.blocks[1].justification + state.last.as_ref() == Some(&setup.blocks[0].justification) }) .await? .clone(); - assert_eq!(state.first, genesis.blocks[0].justification); - assert_eq!(state.last, genesis.blocks[1].justification); + assert_eq!(state.first, setup.blocks[0].header().number); Ok(()) }) .await diff --git a/node/libs/utils/src/lib.rs b/node/libs/utils/src/lib.rs index 26b0011e..31e43ba1 100644 --- a/node/libs/utils/src/lib.rs +++ b/node/libs/utils/src/lib.rs @@ -1,5 +1,4 @@ //! Crate that holds several small utilities and primitives. pub mod enum_util; -pub mod no_copy; pub mod pipe; diff --git a/node/tests/src/main.rs b/node/tests/src/main.rs index e7d5849d..abeaa4c3 100644 --- a/node/tests/src/main.rs +++ b/node/tests/src/main.rs @@ -1,7 +1,7 @@ //! This is a simple test for the RPC server. It checks if the server is running and can respond to. -use std::{fs, io::Write, path::PathBuf}; +use std::{fs, io::Write, net::SocketAddr, path::PathBuf, str::FromStr}; -use anyhow::Context; +use anyhow::{ensure, Context}; use clap::{Parser, Subcommand}; use jsonrpsee::{core::client::ClientT, http_client::HttpClientBuilder, rpc_params}; use zksync_consensus_tools::{k8s, rpc::methods::health_check}; @@ -44,16 +44,18 @@ pub async fn generate_config() -> anyhow::Result<()> { let pods_ip = k8s::get_consensus_nodes_address(&client) .await .context("Failed to get consensus pods address")?; + ensure!( + !pods_ip.is_empty(), + "No consensus pods found in the k8s cluster" + ); let config_file_path = get_config_path(); + let mut config_file = fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(config_file_path)?; for addr in pods_ip { - let mut config_file = fs::OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&config_file_path)?; - config_file - .write_all(addr.to_string().as_bytes()) - .with_context(|| "Failed to write to config file")?; + writeln!(config_file, "{addr}").context("Failed to write to config file")?; } Ok(()) } @@ -73,7 +75,8 @@ pub async fn sanity_test() { let config_file_path = get_config_path(); let nodes_socket = fs::read_to_string(config_file_path).unwrap(); for socket in nodes_socket.lines() { - let url: String = format!("http://{}", socket); + let socket = SocketAddr::from_str(socket).unwrap(); + let url = format!("http://{}", socket); let rpc_client = HttpClientBuilder::default().build(url).unwrap(); let response: serde_json::Value = rpc_client .request(health_check::method(), rpc_params!()) diff --git a/node/tools/build.rs b/node/tools/build.rs index e4bba2bd..f4cfa5df 100644 --- a/node/tools/build.rs +++ b/node/tools/build.rs @@ -3,7 +3,7 @@ fn main() { zksync_protobuf_build::Config { input_root: "src/proto".into(), proto_root: "zksync/tools".into(), - dependencies: vec![], + dependencies: vec!["::zksync_consensus_roles::proto".parse().unwrap()], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: false, } diff --git a/node/tools/src/bin/deployer.rs b/node/tools/src/bin/deployer.rs index 72ca848b..f36d61c9 100644 --- a/node/tools/src/bin/deployer.rs +++ b/node/tools/src/bin/deployer.rs @@ -5,9 +5,9 @@ use std::{fs, path::PathBuf}; use anyhow::Context; use clap::{Parser, Subcommand}; -use rand::Rng; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::node::SecretKey; +use zksync_consensus_roles::validator; use zksync_consensus_tools::{k8s, AppConfig, NodeAddr, NODES_PORT}; /// Command line arguments. @@ -44,14 +44,19 @@ enum DeployerCommands { fn generate_config(nodes: usize) -> anyhow::Result<()> { assert!(nodes > 0, "at least 1 node has to be specified"); - // Each node will have `gossip_peers` inbound peers. + // Generate the keys for all the replicas. + let rng = &mut rand::thread_rng(); + + let setup = validator::testonly::Setup::new(rng, nodes); + let validator_keys = setup.keys.clone(); + + // Each node will have `gossip_peers` outbound peers. let peers = 2; - // Generate the node keys for all the replicas. - let rng = &mut rand::thread_rng(); - let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); + let node_keys: Vec = (0..nodes).map(|_| SecretKey::generate()).collect(); + + let default_config = AppConfig::default_for(setup.genesis.clone()); - let (default_config, validator_keys) = AppConfig::default_for(nodes); let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect(); // Construct a gossip network with optimal diameter. diff --git a/node/tools/src/bin/localnet_config.rs b/node/tools/src/bin/localnet_config.rs index ebe9ba1d..e5ef14bb 100644 --- a/node/tools/src/bin/localnet_config.rs +++ b/node/tools/src/bin/localnet_config.rs @@ -4,7 +4,7 @@ use clap::Parser; use rand::Rng; use std::{fs, net::SocketAddr, path::PathBuf}; use zksync_consensus_crypto::TextFmt; -use zksync_consensus_roles::node; +use zksync_consensus_roles::{node, validator}; use zksync_consensus_tools::AppConfig; /// Command line arguments. @@ -43,14 +43,19 @@ fn main() -> anyhow::Result<()> { .metrics_server_port .map(|port| SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), port)); + // Generate the keys for all the replicas. + let rng = &mut rand::thread_rng(); + + let setup = validator::testonly::Setup::new(rng, addrs.len()); + let validator_keys = setup.keys.clone(); + // Each node will have `gossip_peers` outbound peers. let nodes = addrs.len(); let peers = 2; - let rng = &mut rand::thread_rng(); let node_keys: Vec = (0..nodes).map(|_| rng.gen()).collect(); - let (mut default_config, validator_keys) = AppConfig::default_for(nodes); + let mut default_config = AppConfig::default_for(setup.genesis.clone()); if let Some(metrics_server_addr) = metrics_server_addr { default_config.with_metrics_server_addr(metrics_server_addr); diff --git a/node/tools/src/config.rs b/node/tools/src/config.rs index 55439d32..43494057 100644 --- a/node/tools/src/config.rs +++ b/node/tools/src/config.rs @@ -2,6 +2,7 @@ use crate::{proto, store}; use anyhow::Context as _; use serde_json::{ser::Formatter, Serializer}; +use std::net::Ipv4Addr; use std::str::FromStr; use std::{ collections::{HashMap, HashSet}, @@ -14,8 +15,8 @@ use zksync_consensus_bft as bft; use zksync_consensus_crypto::{read_optional_text, read_required_text, Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{node, validator}; -use zksync_consensus_storage::{BlockStore, BlockStoreRunner, PersistentBlockStore}; -use zksync_protobuf::{required, serde::Serde, ProtoFmt}; +use zksync_consensus_storage::{BlockStore, BlockStoreRunner}; +use zksync_protobuf::{read_required, required, serde::Serde, ProtoFmt}; /// Ports for the nodes to listen on kubernetes pod. pub const NODES_PORT: u16 = 3054; @@ -43,12 +44,6 @@ pub(crate) fn encode_with_serializer( String::from_utf8(serializer.into_inner()).unwrap() } -// pub fn encode_json(x: &T) -> String { -// let mut s = serde_json::Serializer::pretty(vec![]); -// zksync_protobuf::serde::serialize(x, &mut s).unwrap(); -// String::from_utf8(s.into_inner()).unwrap() -// } - /// Pair of (public key, ip address) for a gossip network node. #[derive(Debug, Clone)] pub struct NodeAddr { @@ -81,8 +76,7 @@ pub struct AppConfig { pub public_addr: SocketAddr, pub metrics_server_addr: Option, - pub validators: validator::ValidatorSet, - pub genesis_block: validator::FinalBlock, + pub genesis: validator::Genesis, pub max_payload_size: usize, pub gossip_dynamic_inbound_limit: usize, @@ -94,14 +88,6 @@ impl ProtoFmt for AppConfig { type Proto = proto::AppConfig; fn read(r: &Self::Proto) -> anyhow::Result { - let validators = r.validators.iter().enumerate().map(|(i, v)| { - Text::new(v) - .decode() - .with_context(|| format!("validators[{i}]")) - }); - let validators: anyhow::Result> = validators.collect(); - let validators = validator::ValidatorSet::new(validators?).context("validators")?; - let mut gossip_static_inbound = HashSet::new(); for (i, v) in r.gossip_static_inbound.iter().enumerate() { gossip_static_inbound.insert( @@ -123,8 +109,7 @@ impl ProtoFmt for AppConfig { metrics_server_addr: read_optional_text(&r.metrics_server_addr) .context("metrics_server_addr")?, - validators, - genesis_block: read_required_text(&r.genesis_block).context("genesis_block")?, + genesis: read_required(&r.genesis).context("genesis")?, max_payload_size: required(&r.max_payload_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_payload_size")?, @@ -143,8 +128,7 @@ impl ProtoFmt for AppConfig { public_addr: Some(self.public_addr.encode()), metrics_server_addr: self.metrics_server_addr.as_ref().map(TextFmt::encode), - validators: self.validators.iter().map(TextFmt::encode).collect(), - genesis_block: Some(self.genesis_block.encode()), + genesis: Some(self.genesis.build()), max_payload_size: Some(self.max_payload_size.try_into().unwrap()), gossip_dynamic_inbound_limit: Some( @@ -222,33 +206,19 @@ impl<'a> ConfigPaths<'a> { } impl AppConfig { - pub fn default_for(validators_amount: usize) -> (AppConfig, Vec) { - // Generate the keys for all the replicas. - let rng = &mut rand::thread_rng(); - - let mut genesis = validator::GenesisSetup::empty(rng, validators_amount); - genesis - .next_block() - .payload(validator::Payload(vec![])) - .push(); - let validator_keys = genesis.keys.clone(); - - ( - Self { - server_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - public_addr: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), - metrics_server_addr: None, - - validators: genesis.validator_set(), - genesis_block: genesis.blocks[0].clone(), - max_payload_size: 1000000, - - gossip_dynamic_inbound_limit: 2, - gossip_static_inbound: [].into(), - gossip_static_outbound: [].into(), - }, - validator_keys, - ) + pub fn default_for(genesis: validator::Genesis) -> AppConfig { + Self { + server_addr: SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + public_addr: SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), NODES_PORT), + metrics_server_addr: None, + + genesis, + max_payload_size: 1000000, + + gossip_dynamic_inbound_limit: 2, + gossip_static_inbound: [].into(), + gossip_static_outbound: [].into(), + } } pub fn with_server_addr(&mut self, server_addr: SocketAddr) -> &mut Self { @@ -310,19 +280,12 @@ impl Configs { &self, ctx: &ctx::Ctx, ) -> ctx::Result<(executor::Executor, BlockStoreRunner)> { - let store = store::RocksDB::open(&self.database).await?; - // Store genesis if db is empty. - if store.is_empty().await? { - store - .store_next_block(ctx, &self.app.genesis_block) - .await - .context("store_next_block()")?; - } + let store = store::RocksDB::open(self.app.genesis.clone(), &self.database).await?; let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())).await?; let e = executor::Executor { config: executor::Config { server_addr: self.app.server_addr, - validators: self.app.validators.clone(), + public_addr: self.app.public_addr, node_key: self.node_key.clone(), gossip_dynamic_inbound_limit: self.app.gossip_dynamic_inbound_limit, gossip_static_inbound: self.app.gossip_static_inbound.clone(), @@ -331,10 +294,7 @@ impl Configs { }, block_store, validator: self.validator_key.as_ref().map(|key| executor::Validator { - config: executor::ValidatorConfig { - key: key.clone(), - public_addr: self.app.public_addr, - }, + key: key.clone(), replica_store: Box::new(store), payload_manager: Box::new(bft::testonly::RandomPayload(self.app.max_payload_size)), }), diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index ba2f39b4..89735d03 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -3,17 +3,22 @@ use anyhow::{anyhow, ensure, Context}; use k8s_openapi::{ api::{ apps::v1::{Deployment, DeploymentSpec}, - core::v1::{Container, Namespace, Pod, PodSpec, PodTemplateSpec}, + core::v1::{ + Container, ContainerPort, EnvVar, EnvVarSource, HTTPGetAction, Namespace, + ObjectFieldSelector, Pod, PodSpec, PodTemplateSpec, Probe, + }, }, - apimachinery::pkg::apis::meta::v1::LabelSelector, + apimachinery::pkg::{apis::meta::v1::LabelSelector, util::intstr::IntOrString::Int}, }; use kube::{ api::{ListParams, PostParams}, core::{ObjectList, ObjectMeta}, Api, Client, ResourceExt, }; -use serde_json::json; -use std::{collections::HashMap, net::SocketAddr}; +use std::{ + collections::{BTreeMap, HashMap}, + net::SocketAddr, +}; use tokio_retry::strategy::FixedInterval; use tokio_retry::Retry; use tracing::log::info; @@ -39,44 +44,38 @@ pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result = pods - .into_iter() - .filter_map(|pod| { - let pod_spec = pod.spec.context("Failed to get pod spec").ok()?; - let pod_running_container = pod_spec - .containers - .first() - .context("Failed to get pod container") - .ok()? - .to_owned(); - let docker_image = pod_running_container - .image - .context("Failed to get pod docker image") - .ok()?; - - if docker_image.contains(DOCKER_IMAGE_NAME) { - let pod_ip = pod - .status - .context("Failed to get pod status") - .ok()? - .pod_ip - .context("Failed to get pod ip") - .ok()?; - let port = pod_running_container.ports?.iter().find_map(|port| { - let port = port.container_port.try_into().ok()?; + let mut node_rpc_addresses: Vec = Vec::new(); + for pod in pods.into_iter() { + let pod_spec = pod.spec.as_ref().context("Failed to get pod spec")?; + let pod_container = pod_spec + .containers + .first() + .context("Failed to get container")?; + if pod_container + .image + .as_ref() + .context("Failed to get image")? + .contains(DOCKER_IMAGE_NAME) + { + let pod_ip = pod + .status + .context("Failed to get pod status")? + .pod_ip + .context("Failed to get pod ip")?; + let pod_rpc_port = pod_container + .ports + .as_ref() + .context("Failed to get ports of container")? + .iter() + .find_map(|port| { + let port: u16 = port.container_port.try_into().ok()?; (port != config::NODES_PORT).then_some(port) - }); - Some(SocketAddr::new(pod_ip.parse().ok()?, port?)) - } else { - None - } - }) - .collect(); - ensure!( - !pod_addresses.is_empty(), - "No consensus pods found in the k8s cluster" - ); - Ok(pod_addresses) + }) + .context("Failed parsing container port")?; + node_rpc_addresses.push(SocketAddr::new(pod_ip.parse()?, pod_rpc_port)); + } + } + Ok(node_rpc_addresses) } /// Creates a namespace in k8s cluster @@ -84,16 +83,14 @@ pub async fn create_or_reuse_namespace(client: &Client, name: &str) -> anyhow::R let namespaces: Api = Api::all(client.clone()); match namespaces.get_opt(name).await? { None => { - let namespace: Namespace = serde_json::from_value(json!({ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": name, - "labels": { - "name": name - } - } - }))?; + let namespace = Namespace { + metadata: ObjectMeta { + name: Some(name.to_owned()), + labels: Some(BTreeMap::from([("name".to_owned(), name.to_owned())])), + ..Default::default() + }, + ..Default::default() + }; let namespaces: Api = Api::all(client.clone()); let post_params = PostParams::default(); @@ -180,76 +177,87 @@ pub async fn deploy_node( ) -> anyhow::Result<()> { let cli_args = get_cli_args(peers); let node_name = format!("consensus-node-{node_index:0>2}"); - let deployment: Deployment = serde_json::from_value(json!({ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": { - "name": node_name, - "namespace": namespace - }, - "spec": { - "selector": { - "matchLabels": { - "app": node_name - } + let deployment = Deployment { + metadata: ObjectMeta { + name: Some(node_name.to_owned()), + namespace: Some(namespace.to_owned()), + ..Default::default() + }, + spec: Some(DeploymentSpec { + selector: LabelSelector { + match_labels: Some(BTreeMap::from([("app".to_owned(), node_name.to_owned())])), + ..Default::default() }, - "replicas": 1, - "template": { - "metadata": { - "labels": { - "app": node_name, - "id": node_name, - "seed": is_seed.to_string() - } - }, - "spec": { - "containers": [ - { - "name": node_name, - "image": DOCKER_IMAGE_NAME, - "env": [ - { - "name": "NODE_ID", - "value": node_name - }, - { - "name": "PUBLIC_ADDR", - "valueFrom": { - "fieldRef": { - "fieldPath": "status.podIP" - } - } - } - ], - "command": ["./k8s_entrypoint.sh"], - "args": cli_args, - "imagePullPolicy": "Never", - "ports": [ - { - "containerPort": config::NODES_PORT - }, - { - "containerPort": 3154 - } - ], - "livenessProbe": { - "httpGet": { - "path": "/health", - "port": 3154 - } - }, - "readinessProbe": { - "httpGet": { - "path": "/health", - "port": 3154 - } - } - } - ] - } - } - } - }))?; + replicas: Some(1), + template: PodTemplateSpec { + metadata: Some(ObjectMeta { + labels: Some(BTreeMap::from([ + ("app".to_owned(), node_name.to_owned()), + ("id".to_owned(), node_name.to_owned()), + ("seed".to_owned(), is_seed.to_string()), + ])), + ..Default::default() + }), + spec: Some(PodSpec { + containers: vec![Container { + name: node_name.to_owned(), + image: Some("consensus-node".to_owned()), + env: Some(vec![ + EnvVar { + name: "NODE_ID".to_owned(), + value: Some(node_name.to_owned()), + ..Default::default() + }, + EnvVar { + name: "PUBLIC_ADDR".to_owned(), + value_from: Some(EnvVarSource { + field_ref: Some(ObjectFieldSelector { + field_path: "status.podIP".to_owned(), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }, + ]), + command: Some(vec!["./k8s_entrypoint.sh".to_owned()]), + args: Some(cli_args), + image_pull_policy: Some("Never".to_owned()), + ports: Some(vec![ + ContainerPort { + container_port: i32::from(config::NODES_PORT), + ..Default::default() + }, + ContainerPort { + container_port: 3154, + ..Default::default() + }, + ]), + liveness_probe: Some(Probe { + http_get: Some(HTTPGetAction { + path: Some("/health".to_owned()), + port: Int(3154), + ..Default::default() + }), + ..Default::default() + }), + readiness_probe: Some(Probe { + http_get: Some(HTTPGetAction { + path: Some("/health".to_owned()), + port: Int(3154), + ..Default::default() + }), + ..Default::default() + }), + ..Default::default() + }], + ..Default::default() + }), + }, + ..Default::default() + }), + ..Default::default() + }; let deployments: Api = Api::namespaced(client.clone(), namespace); let post_params = PostParams::default(); @@ -279,7 +287,7 @@ pub async fn get_seed_node_addrs( let pod_list = Retry::spawn(retry_strategy, || get_seed_pods(&pods, amount)).await?; for p in pod_list { - let node_id = p.labels()["id"].clone(); + let node_id = p.labels()["id"].to_owned(); seed_nodes.insert( node_id, p.status diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 33ff0beb..22056d9d 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -8,7 +8,6 @@ use tracing_subscriber::{prelude::*, Registry}; use vise_exporter::MetricsExporter; use zksync_concurrency::{ctx, scope}; use zksync_consensus_tools::{decode_json, ConfigPaths, NodeAddr, RPCServer}; -use zksync_consensus_utils::no_copy::NoCopy; use zksync_protobuf::serde::Serde; /// Wrapper for Vec. @@ -133,10 +132,8 @@ async fn main() -> anyhow::Result<()> { // Initialize the storage. scope::run!(ctx, |ctx, s| async { - if let Some(addr) = configs.app.metrics_server_addr { - let addr = NoCopy::from(addr); + if let Some(addr) = &configs.app.metrics_server_addr { s.spawn_bg(async { - let addr = addr; MetricsExporter::default() .with_graceful_shutdown(ctx.canceled()) .start(*addr) diff --git a/node/tools/src/proto/mod.proto b/node/tools/src/proto/mod.proto index 3cac756d..5be829df 100644 --- a/node/tools/src/proto/mod.proto +++ b/node/tools/src/proto/mod.proto @@ -27,21 +27,16 @@ // NodePublicKey - public key of the node (gossip network participant) of the form "node:public::" // Currently only ed25519 signature scheme is supported for nodes. // example: "node:public:ed25519:d36607699a0a3fbe3de16947928cf299484219ff62ca20f387795b0859dbe501" -// -// FinalBlock - hex encoded serialized roles.validator.FinalBlock. -// Used to specify the genesis block of the chain. -// This blob of data is not customizable and we don't want to impose -// json-level backward compatibility on anything else but the configs. -// TODO(gprusak): either don't include it at all (derive genesis purely from -// the validator set) or move it to a separate config file. syntax = "proto3"; package zksync.tools; +import "zksync/roles/validator.proto"; + // (public key, ip address) of a gossip network node. message NodeAddr { - optional string key = 1; // [required] NodePublicKey - optional string addr = 2; // [required] IpAddr + optional string key = 1; // required; NodePublicKey + optional string addr = 2; // required; IpAddr } // Application configuration. @@ -50,36 +45,32 @@ message AppConfig { // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). - optional string server_addr = 1; // [required] IpAddr + optional string server_addr = 1; // required; IpAddr // Public IP:port to advertise, should forward to server_addr. - optional string public_addr = 2; // [required] IpAddr + optional string public_addr = 2; // required; IpAddr // IP:port to serve metrics data for scraping. // Use `0.0.0.0:` to listen on all network interfaces. // If not set, metrics data won't be served. - optional string metrics_server_addr = 3; // [optional] IpAddr + optional string metrics_server_addr = 3; // optional; IpAddr // Consensus - // Public keys of all validators. - repeated string validators = 5; // [required] ValidatorPublicKey - - // Genesis block of the blockchain. - // Will be inserted to storage if not already present. - optional string genesis_block = 6; // [required] FinalBlock + // Specification of the chain. + optional roles.validator.Genesis genesis = 4; // required // Maximal size of the block payload. - optional uint64 max_payload_size = 11; // [required] B + optional uint64 max_payload_size = 5; // required; bytes // Gossip network // Limit on the number of gossip network inbound connections outside // of the `gossip_static_inbound` set. - optional uint64 gossip_dynamic_inbound_limit = 8; // [required] + optional uint64 gossip_dynamic_inbound_limit = 6; // required // Inbound connections that should be unconditionally accepted on the gossip network. - repeated string gossip_static_inbound = 9; // NodePublicKey + repeated string gossip_static_inbound = 7; // NodePublicKey // Outbound gossip network connections that the node should actively try to // establish and maintain. - repeated NodeAddr gossip_static_outbound = 10; + repeated NodeAddr gossip_static_outbound = 8; } diff --git a/node/tools/src/rpc/methods/last_view.rs b/node/tools/src/rpc/methods/last_view.rs index 705a7df4..832e017c 100644 --- a/node/tools/src/rpc/methods/last_view.rs +++ b/node/tools/src/rpc/methods/last_view.rs @@ -1,14 +1,16 @@ //! Peers method for RPC server. use jsonrpsee::core::RpcResult; use std::sync::Arc; -use zksync_consensus_storage::{BlockStore, ReplicaState}; +use zksync_consensus_storage::BlockStore; /// Config response for /config endpoint. pub fn callback(node_storage: Arc) -> RpcResult { let sub = &mut node_storage.subscribe(); let state = sub.borrow().clone(); - let replica_state = ReplicaState::from(state.last).view; - Ok(serde_json::json!(replica_state)) + let a = state.last.unwrap().view().number; + Ok(serde_json::json!({ + "last_view": a + })) } /// Config method name. diff --git a/node/tools/src/store.rs b/node/tools/src/store.rs index 189ee93e..f2832ae3 100644 --- a/node/tools/src/store.rs +++ b/node/tools/src/store.rs @@ -8,7 +8,7 @@ use std::{ }; use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::validator; -use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; +use zksync_consensus_storage::{PersistentBlockStore, ReplicaState, ReplicaStore}; /// Enum used to represent a key in the database. It also acts as a separator between different stores. #[derive(Debug, Clone, PartialEq, Eq)] @@ -47,62 +47,51 @@ impl DatabaseKey { } } +struct Inner { + genesis: validator::Genesis, + db: RwLock, +} + /// Main struct for the Storage module, it just contains the database. Provides a set of high-level /// atomic operations on the database. It "contains" the following data: /// /// - An append-only database of finalized blocks. /// - A backup of the consensus replica state. #[derive(Clone)] -pub(crate) struct RocksDB(Arc>); +pub(crate) struct RocksDB(Arc); impl RocksDB { /// Create a new Storage. It first tries to open an existing database, and if that fails it just creates a /// a new one. We need the genesis block of the chain as input. - pub(crate) async fn open(path: &Path) -> ctx::Result { + pub(crate) async fn open(genesis: validator::Genesis, path: &Path) -> ctx::Result { let mut options = rocksdb::Options::default(); options.create_missing_column_families(true); options.create_if_missing(true); - Ok(Self(Arc::new(RwLock::new( - scope::wait_blocking(|| { - rocksdb::DB::open(&options, path).context("Failed opening RocksDB") - }) - .await?, - )))) + Ok(Self(Arc::new(Inner { + genesis, + db: RwLock::new( + scope::wait_blocking(|| { + rocksdb::DB::open(&options, path).context("Failed opening RocksDB") + }) + .await?, + ), + }))) } - fn state_blocking(&self) -> anyhow::Result> { - let db = self.0.read().unwrap(); - - let mut options = ReadOptions::default(); - options.set_iterate_range(DatabaseKey::BLOCKS_START_KEY..); - let Some(res) = db.iterator_opt(IteratorMode::Start, options).next() else { - return Ok(None); - }; - let (_, first) = res.context("RocksDB error reading first stored block")?; - let first: validator::FinalBlock = - zksync_protobuf::decode(&first).context("Failed decoding first stored block bytes")?; - + fn last_blocking(&self) -> anyhow::Result> { + let db = self.0.db.read().unwrap(); let mut options = ReadOptions::default(); options.set_iterate_range(DatabaseKey::BLOCKS_START_KEY..); - let (_, last) = db + let Some(res) = db .iterator_opt(DatabaseKey::BLOCK_HEAD_ITERATOR, options) .next() - .context("last block not found")? - .context("RocksDB error reading head block")?; + else { + return Ok(None); + }; + let (_, last) = res.context("RocksDB error reading head block")?; let last: validator::FinalBlock = zksync_protobuf::decode(&last).context("Failed decoding head block bytes")?; - - Ok(Some(BlockStoreState { - first: first.justification, - last: last.justification, - })) - } - - /// Checks if BlockStore is empty. - pub(crate) async fn is_empty(&self) -> anyhow::Result { - Ok(scope::wait_blocking(|| self.state_blocking()) - .await? - .is_none()) + Ok(Some(last.justification)) } } @@ -114,10 +103,12 @@ impl fmt::Debug for RocksDB { #[async_trait::async_trait] impl PersistentBlockStore for RocksDB { - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { - Ok(scope::wait_blocking(|| self.state_blocking()) - .await? - .context("storage is empty")?) + async fn genesis(&self, _ctx: &ctx::Ctx) -> ctx::Result { + Ok(self.0.genesis.clone()) + } + + async fn last(&self, _ctx: &ctx::Ctx) -> ctx::Result> { + Ok(scope::wait_blocking(|| self.last_blocking()).await?) } async fn block( @@ -126,7 +117,7 @@ impl PersistentBlockStore for RocksDB { number: validator::BlockNumber, ) -> ctx::Result { scope::wait_blocking(|| { - let db = self.0.read().unwrap(); + let db = self.0.db.read().unwrap(); let block = db .get(DatabaseKey::Block(number).encode_key()) .context("RocksDB error")? @@ -144,7 +135,7 @@ impl PersistentBlockStore for RocksDB { block: &validator::FinalBlock, ) -> ctx::Result<()> { scope::wait_blocking(|| { - let db = self.0.write().unwrap(); + let db = self.0.db.write().unwrap(); let block_number = block.header().number; let mut write_batch = rocksdb::WriteBatch::default(); write_batch.put( @@ -163,20 +154,19 @@ impl PersistentBlockStore for RocksDB { #[async_trait::async_trait] impl ReplicaStore for RocksDB { - async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result> { + async fn state(&self, _ctx: &ctx::Ctx) -> ctx::Result { Ok(scope::wait_blocking(|| { let Some(raw_state) = self .0 + .db .read() .unwrap() .get(DatabaseKey::ReplicaState.encode_key()) .context("Failed to get ReplicaState from RocksDB")? else { - return Ok(None); + return Ok(ReplicaState::default()); }; - zksync_protobuf::decode(&raw_state) - .map(Some) - .context("Failed to decode replica state!") + zksync_protobuf::decode(&raw_state).context("Failed to decode replica state!") }) .await?) } @@ -184,6 +174,7 @@ impl ReplicaStore for RocksDB { async fn set_state(&self, _ctx: &ctx::Ctx, state: &ReplicaState) -> ctx::Result<()> { Ok(scope::wait_blocking(|| { self.0 + .db .write() .unwrap() .put( diff --git a/node/tools/src/tests.rs b/node/tools/src/tests.rs index f758349c..15d2f840 100644 --- a/node/tools/src/tests.rs +++ b/node/tools/src/tests.rs @@ -5,7 +5,7 @@ use rand::{ }; use tempfile::TempDir; use zksync_concurrency::ctx; -use zksync_consensus_roles::{node, validator::testonly::GenesisSetup}; +use zksync_consensus_roles::{node, validator::testonly::Setup}; use zksync_consensus_storage::{testonly, PersistentBlockStore}; use zksync_protobuf::testonly::test_encode_random; @@ -15,22 +15,22 @@ fn make_addr(rng: &mut R) -> std::net::SocketAddr { impl Distribution for Standard { fn sample(&self, rng: &mut R) -> AppConfig { - let (mut config, _) = AppConfig::default_for(1); - config - .with_server_addr(make_addr(rng)) - .with_public_addr(make_addr(rng)) - .with_metrics_server_addr(make_addr(rng)) - .with_gossip_dynamic_inbound_limit(rng.gen()) - .with_gossip_dynamic_inbound_limit(rng.gen()) - .with_max_payload_size(rng.gen()); - (0..5).for_each(|_| { - let _ = config.add_gossip_static_inbound(rng.gen::().public()); - }); - (0..6).for_each(|_| { - let _ = config - .add_gossip_static_outbound(rng.gen::().public(), make_addr(rng)); - }); - config + AppConfig { + server_addr: make_addr(rng), + public_addr: make_addr(rng), + metrics_server_addr: Some(make_addr(rng)), + + genesis: rng.gen(), + + gossip_dynamic_inbound_limit: rng.gen(), + gossip_static_inbound: (0..5) + .map(|_| rng.gen::().public()) + .collect(), + gossip_static_outbound: (0..6) + .map(|_| (rng.gen::().public(), make_addr(rng))) + .collect(), + max_payload_size: rng.gen(), + } } } @@ -46,11 +46,13 @@ async fn test_reopen_rocksdb() { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let dir = TempDir::new().unwrap(); - let mut setup = GenesisSetup::empty(rng, 3); + let mut setup = Setup::new(rng, 3); setup.push_blocks(rng, 5); let mut want = vec![]; for b in &setup.blocks { - let store = store::RocksDB::open(dir.path()).await.unwrap(); + let store = store::RocksDB::open(setup.genesis.clone(), dir.path()) + .await + .unwrap(); store.store_next_block(ctx, b).await.unwrap(); want.push(b.clone()); assert_eq!(want, testonly::dump(ctx, &store).await); From c366ecc8aec611c2fc157d7aede91f11a1f0b655 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 7 Mar 2024 12:16:05 -0300 Subject: [PATCH 135/139] Update comments and delete serialize for view number --- .../roles/src/validator/messages/consensus.rs | 2 +- node/tools/src/rpc/methods/last_view.rs | 20 ++++++++++++++----- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 00ec37cb..51502bf6 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -358,7 +358,7 @@ impl std::ops::BitAnd for &Signers { } /// A struct that represents a view number. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ViewNumber(pub u64); impl ViewNumber { diff --git a/node/tools/src/rpc/methods/last_view.rs b/node/tools/src/rpc/methods/last_view.rs index 832e017c..162ca902 100644 --- a/node/tools/src/rpc/methods/last_view.rs +++ b/node/tools/src/rpc/methods/last_view.rs @@ -1,19 +1,29 @@ //! Peers method for RPC server. -use jsonrpsee::core::RpcResult; +use anyhow::Context; +use jsonrpsee::{ + core::RpcResult, + types::{error::ErrorCode, ErrorObjectOwned}, +}; use std::sync::Arc; use zksync_consensus_storage::BlockStore; -/// Config response for /config endpoint. +/// Last view response for /last_view endpoint. pub fn callback(node_storage: Arc) -> RpcResult { let sub = &mut node_storage.subscribe(); let state = sub.borrow().clone(); - let a = state.last.unwrap().view().number; + let last_view = state + .last + .context("Failed to get last state") + .map_err(|_| ErrorObjectOwned::from(ErrorCode::InternalError))? + .view() + .number + .0; Ok(serde_json::json!({ - "last_view": a + "last_view": last_view })) } -/// Config method name. +/// Last view method name. pub fn method() -> &'static str { "last_view" } From f3569cdc0030c7faa68aa5cf7a17ea5fe3179552 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Thu, 7 Mar 2024 12:36:49 -0300 Subject: [PATCH 136/139] Delete unnused rpc endpoints --- .../roles/src/validator/messages/consensus.rs | 1 - node/tools/src/main.rs | 6 ++-- node/tools/src/rpc/methods/config.rs | 22 ------------- node/tools/src/rpc/methods/mod.rs | 2 -- node/tools/src/rpc/methods/peers.rs | 33 ------------------- node/tools/src/rpc/server.rs | 18 ++-------- 6 files changed, 4 insertions(+), 78 deletions(-) delete mode 100644 node/tools/src/rpc/methods/config.rs delete mode 100644 node/tools/src/rpc/methods/peers.rs diff --git a/node/libs/roles/src/validator/messages/consensus.rs b/node/libs/roles/src/validator/messages/consensus.rs index 51502bf6..870355b0 100644 --- a/node/libs/roles/src/validator/messages/consensus.rs +++ b/node/libs/roles/src/validator/messages/consensus.rs @@ -4,7 +4,6 @@ use super::{ }; use crate::validator; use bit_vec::BitVec; -use serde::Serialize; use std::{ collections::{BTreeMap, BTreeSet}, fmt, diff --git a/node/tools/src/main.rs b/node/tools/src/main.rs index 22056d9d..4d74990b 100644 --- a/node/tools/src/main.rs +++ b/node/tools/src/main.rs @@ -124,11 +124,9 @@ async fn main() -> anyhow::Result<()> { rpc_addr.set_port(rpc_addr.port() + 100); } + // Create the RPC server with the executor's storage. let node_storage = executor.block_store.clone(); - - // cloning configuration to let RPCServer show it - // TODO this should be queried in real time instead, to reflect any possible change in config - let rpc_server = RPCServer::new(rpc_addr, configs.app.clone(), node_storage); + let rpc_server = RPCServer::new(rpc_addr, node_storage); // Initialize the storage. scope::run!(ctx, |ctx, s| async { diff --git a/node/tools/src/rpc/methods/config.rs b/node/tools/src/rpc/methods/config.rs deleted file mode 100644 index 2dd77498..00000000 --- a/node/tools/src/rpc/methods/config.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! Peers method for RPC server. -use crate::{config::encode_json, AppConfig}; -use jsonrpsee::core::RpcResult; -use zksync_protobuf::serde::Serde; - -/// Config response for /config endpoint. -pub fn callback(config: AppConfig) -> RpcResult { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - Ok(serde_json::json!({ - "config": encode_json(&Serde(config)) - })) -} - -/// Config method name. -pub fn method() -> &'static str { - "config" -} - -/// Method path for GET requests. -pub fn path() -> &'static str { - "/config" -} diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index a0ef7d36..0ba70504 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,4 +1,2 @@ -pub mod config; pub mod health_check; pub mod last_view; -pub mod peers; diff --git a/node/tools/src/rpc/methods/peers.rs b/node/tools/src/rpc/methods/peers.rs deleted file mode 100644 index 58324529..00000000 --- a/node/tools/src/rpc/methods/peers.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! Peers method for RPC server. -use crate::{decode_json, AppConfig}; -use jsonrpsee::{core::RpcResult, types::error::ErrorCode}; -use std::fs::{self}; -use zksync_consensus_crypto::TextFmt; -use zksync_protobuf::serde::Serde; - -/// Peers response for /peers endpoint. -pub fn callback() -> RpcResult { - // This may change in the future since we are assuming that the executor binary is being run inside the config directory. - let node_config = fs::read_to_string("config.json").map_err(|_e| ErrorCode::InternalError)?; - let node_config = decode_json::>(&node_config) - .map_err(|_e| ErrorCode::InternalError)? - .0; - let peers: Vec = node_config - .gossip_static_inbound - .iter() - .map(|x| x.encode()) - .collect(); - Ok(serde_json::json!({ - "peers": peers - })) -} - -/// Peers method name. -pub fn method() -> &'static str { - "peers" -} - -/// Method path for GET requests. -pub fn path() -> &'static str { - "/peers" -} diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 2a5c8a51..32fb2899 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,6 +1,4 @@ -use crate::AppConfig; - -use super::methods::{config, health_check, last_view, peers}; +use super::methods::{health_check, last_view}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use std::{net::SocketAddr, sync::Arc}; use zksync_concurrency::{ctx, scope}; @@ -10,17 +8,14 @@ use zksync_consensus_storage::BlockStore; pub struct RPCServer { /// IP address to bind to. ip_address: SocketAddr, - /// AppConfig - config: AppConfig, /// Node storage. node_storage: Arc, } impl RPCServer { - pub fn new(ip_address: SocketAddr, config: AppConfig, node_storage: Arc) -> Self { + pub fn new(ip_address: SocketAddr, node_storage: Arc) -> Self { Self { ip_address, - config, node_storage, } } @@ -34,8 +29,6 @@ impl RPCServer { health_check::path(), health_check::method(), )?) - .layer(ProxyGetRequestLayer::new(peers::path(), peers::method())?) - .layer(ProxyGetRequestLayer::new(config::path(), config::method())?) .layer(ProxyGetRequestLayer::new( last_view::path(), last_view::method(), @@ -45,13 +38,6 @@ impl RPCServer { module.register_method(health_check::method(), |_params, _| { health_check::callback() })?; - module.register_method(peers::method(), |_params, _| peers::callback())?; - - // TODO find a better way to implement this as I had to clone the clone and move it to pass the borrow checker - let config = self.config.clone(); - module.register_method(config::method(), move |_params, _| { - config::callback(config.clone()) - })?; let node_storage = self.node_storage.clone(); module.register_method(last_view::method(), move |_params, _| { From dc8289b9371b75a269e327136ffc9493b09fb2a1 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Fri, 8 Mar 2024 11:14:41 -0300 Subject: [PATCH 137/139] Add endpoint to get last commited block for a replica --- .../src/rpc/methods/last_commited_block.rs | 34 +++++++++++++++++++ node/tools/src/rpc/methods/mod.rs | 1 + node/tools/src/rpc/server.rs | 11 +++++- 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 node/tools/src/rpc/methods/last_commited_block.rs diff --git a/node/tools/src/rpc/methods/last_commited_block.rs b/node/tools/src/rpc/methods/last_commited_block.rs new file mode 100644 index 00000000..1f28bdb7 --- /dev/null +++ b/node/tools/src/rpc/methods/last_commited_block.rs @@ -0,0 +1,34 @@ +//! Peers method for RPC server. +use anyhow::Context; +use jsonrpsee::{ + core::RpcResult, + types::{error::ErrorCode, ErrorObjectOwned}, +}; +use std::sync::Arc; +use zksync_consensus_storage::BlockStore; + +/// Last view response for /last_view endpoint. +pub fn callback(node_storage: Arc) -> RpcResult { + let sub = &mut node_storage.subscribe(); + let state = sub.borrow().clone(); + let last_commited_block_header = state + .last + .context("Failed to get last state") + .map_err(|_| ErrorObjectOwned::from(ErrorCode::InternalError))? + .header() + .number + .0; + Ok(serde_json::json!({ + "last_commited_block": last_commited_block_header + })) +} + +/// Last view method name. +pub fn method() -> &'static str { + "last_commited_block" +} + +/// Method path for GET requests. +pub fn path() -> &'static str { + "/last_commited_block" +} diff --git a/node/tools/src/rpc/methods/mod.rs b/node/tools/src/rpc/methods/mod.rs index 0ba70504..dc44e62d 100644 --- a/node/tools/src/rpc/methods/mod.rs +++ b/node/tools/src/rpc/methods/mod.rs @@ -1,2 +1,3 @@ pub mod health_check; +pub mod last_commited_block; pub mod last_view; diff --git a/node/tools/src/rpc/server.rs b/node/tools/src/rpc/server.rs index 32fb2899..e9522051 100644 --- a/node/tools/src/rpc/server.rs +++ b/node/tools/src/rpc/server.rs @@ -1,4 +1,4 @@ -use super::methods::{health_check, last_view}; +use super::methods::{health_check, last_commited_block, last_view}; use jsonrpsee::server::{middleware::http::ProxyGetRequestLayer, RpcModule, Server}; use std::{net::SocketAddr, sync::Arc}; use zksync_concurrency::{ctx, scope}; @@ -32,6 +32,10 @@ impl RPCServer { .layer(ProxyGetRequestLayer::new( last_view::path(), last_view::method(), + )?) + .layer(ProxyGetRequestLayer::new( + last_commited_block::path(), + last_commited_block::method(), )?); let mut module = RpcModule::new(()); @@ -44,6 +48,11 @@ impl RPCServer { last_view::callback(node_storage.clone()) })?; + let node_storage = self.node_storage.clone(); + module.register_method(last_commited_block::method(), move |_params, _| { + last_commited_block::callback(node_storage.clone()) + })?; + let server = Server::builder() .set_http_middleware(service_builder) .build(self.ip_address) From d37329f231cd25ddade14dc20a2c3c6bb2dfc797 Mon Sep 17 00:00:00 2001 From: IAvecilla Date: Mon, 11 Mar 2024 17:11:11 -0300 Subject: [PATCH 138/139] Fix unnecesary iter conversion --- node/tools/src/k8s.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index 89735d03..0e79aa52 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -45,7 +45,7 @@ pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result = Vec::new(); - for pod in pods.into_iter() { + for pod in pods { let pod_spec = pod.spec.as_ref().context("Failed to get pod spec")?; let pod_container = pod_spec .containers @@ -68,11 +68,11 @@ pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result Date: Fri, 15 Mar 2024 10:35:14 -0300 Subject: [PATCH 139/139] Remove old changes --- node/tools/src/k8s.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/tools/src/k8s.rs b/node/tools/src/k8s.rs index a1b9e16a..5de1ace1 100644 --- a/node/tools/src/k8s.rs +++ b/node/tools/src/k8s.rs @@ -215,11 +215,11 @@ pub async fn get_consensus_nodes_address(client: &Client) -> anyhow::Result