Skip to content

Commit

Permalink
Merge branch 'main' into weighted_validators
Browse files Browse the repository at this point in the history
  • Loading branch information
ElFantasma committed Mar 19, 2024
2 parents 7689c4f + 1f77a0e commit f6a3b52
Show file tree
Hide file tree
Showing 9 changed files with 185 additions and 201 deletions.
4 changes: 0 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,9 @@ COPY --from=builder /app/target/release/tester .
FROM debian:stable-slim as executor-runtime

COPY /node/tools/docker_binaries/executor /node/
COPY /node/tools/k8s_configs/ /node/k8s_config
COPY /node/tools/docker-config/ /node/docker_config
COPY docker-entrypoint.sh /node/
COPY k8s_entrypoint.sh /node/

WORKDIR /node
RUN chmod +x docker-entrypoint.sh
RUN chmod +x k8s_entrypoint.sh

ENTRYPOINT ["./docker-entrypoint.sh"]
Expand Down
3 changes: 1 addition & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,9 @@ docker_node_image:
# Kubernetes commands

start_k8s_nodes:
cd ${EXECUTABLE_NODE_DIR} && cargo run --bin deployer generate-config --nodes ${NODES}
$(MAKE) docker_node_image
minikube image load consensus-node:latest
cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer deploy --nodes ${NODES} --seed-nodes ${SEED_NODES}
cd ${EXECUTABLE_NODE_DIR} && cargo run --release --bin deployer -- --nodes ${NODES} --seed-nodes ${SEED_NODES}

# Clean commands

Expand Down
3 changes: 1 addition & 2 deletions k8s_entrypoint.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/bin/bash
# This file works as an entrypoint of the kubernetes cluster running the node binary copied inside of it.

cd k8s_config/${NODE_ID}
export RUST_LOG=INFO
../../executor $@
./executor $@
2 changes: 2 additions & 0 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -156,5 +156,7 @@ wildcard_dependencies = "warn"
redundant_locals = "allow"
needless_pass_by_ref_mut = "allow"
box_default = "allow"
# remove once fix to https://github.com/rust-lang/rust-clippy/issues/11764 is available on CI.
map_identity = "allow"
# &*x is not equivalent to x, because it affects borrowing in closures.
borrow_deref_ref = "allow"
120 changes: 31 additions & 89 deletions node/tools/src/bin/deployer.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
//! Deployer for the kubernetes cluster.
use anyhow::Context;
use clap::{Parser, Subcommand};
use std::{collections::HashMap, fs, path::PathBuf};
use zksync_consensus_crypto::{Text, TextFmt};
use clap::Parser;
use std::collections::HashMap;
use zksync_consensus_roles::{node, validator};
use zksync_consensus_tools::k8s::ConsensusNode;
use zksync_consensus_tools::{k8s, AppConfig};

/// K8s namespace for consensus nodes.
Expand All @@ -13,14 +12,6 @@ const NAMESPACE: &str = "consensus";
#[derive(Debug, Parser)]
#[command(name = "deployer")]
struct DeployerCLI {
/// Subcommand to run.
#[command(subcommand)]
command: DeployerCommands,
}

/// Subcommand arguments.
#[derive(Debug, Parser)]
struct SubCommandArgs {
/// Number of total nodes to deploy.
#[arg(long)]
nodes: usize,
Expand All @@ -29,19 +20,11 @@ struct SubCommandArgs {
seed_nodes: Option<usize>,
}

/// Subcommands.
#[derive(Subcommand, Debug)]
enum DeployerCommands {
/// Generate configs for the nodes.
GenerateConfig(SubCommandArgs),
/// Deploy the nodes.
Deploy(SubCommandArgs),
}

/// Generates config for the nodes to run in the kubernetes cluster
/// Creates a directory for each node in the parent k8s_configs directory.
fn generate_config(nodes: usize) -> anyhow::Result<()> {
/// Generates the configuration for all the nodes to run in the kubernetes cluster
/// and creates a ConsensusNode for each to track their progress
fn generate_consensus_nodes(nodes: usize, seed_nodes_amount: Option<usize>) -> Vec<ConsensusNode> {
assert!(nodes > 0, "at least 1 node has to be specified");
let seed_nodes_amount = seed_nodes_amount.unwrap_or(1);

// Generate the keys for all the replicas.
let rng = &mut rand::thread_rng();
Expand All @@ -56,54 +39,40 @@ fn generate_config(nodes: usize) -> anyhow::Result<()> {

let default_config = AppConfig::default_for(setup.genesis.clone());

let mut cfgs: Vec<_> = (0..nodes).map(|_| default_config.clone()).collect();
let mut cfgs: Vec<ConsensusNode> = (0..nodes)
.map(|i| ConsensusNode {
id: format!("consensus-node-{i:0>2}"),
config: default_config.clone(),
key: node_keys[i].clone(),
validator_key: Some(validator_keys[i].clone()),
node_addr: None, //It's not assigned yet
is_seed: i < seed_nodes_amount,
})
.collect();

// Construct a gossip network with optimal diameter.
for (i, node) in node_keys.iter().enumerate() {
for j in 0..peers {
let next = (i * peers + j + 1) % nodes;
cfgs[next].add_gossip_static_inbound(node.public());
cfgs[next].config.add_gossip_static_inbound(node.public());
}
}

let manifest_path = std::env::var("CARGO_MANIFEST_DIR")?;
let root = PathBuf::from(manifest_path).join("k8s_configs");
let _ = fs::remove_dir_all(&root);
for (i, cfg) in cfgs.into_iter().enumerate() {
let node_config_dir = root.join(format!("consensus-node-{i:0>2}"));
fs::create_dir_all(&node_config_dir)
.with_context(|| format!("create_dir_all({:?})", node_config_dir))?;

cfg.write_to_file(&node_config_dir)?;
fs::write(
node_config_dir.join("validator_key"),
&TextFmt::encode(&validator_keys[i]),
)
.context("fs::write()")?;
fs::write(
node_config_dir.join("node_key"),
&TextFmt::encode(&node_keys[i]),
)
.context("fs::write()")?;
}

Ok(())
cfgs
}

/// Deploys the nodes to the kubernetes cluster.
async fn deploy(nodes_amount: usize, seed_nodes_amount: Option<usize>) -> anyhow::Result<()> {
let mut consensus_nodes = generate_consensus_nodes(nodes_amount, seed_nodes_amount);
let client = k8s::get_client().await?;
k8s::create_or_reuse_namespace(&client, NAMESPACE).await?;
let seed_nodes_amount = seed_nodes_amount.unwrap_or(1);

let seed_nodes = &mut HashMap::new();
let mut non_seed_nodes = HashMap::new();

// Split the nodes in different hash maps as they will be deployed at different stages
let mut consensus_nodes = from_configs(nodes_amount)?;
for (index, node) in consensus_nodes.iter_mut().enumerate() {
if index < seed_nodes_amount {
node.is_seed = true;
for node in consensus_nodes.iter_mut() {
if node.is_seed {
seed_nodes.insert(node.id.to_owned(), node);
} else {
non_seed_nodes.insert(node.id.to_owned(), node);
Expand All @@ -120,58 +89,31 @@ async fn deploy(nodes_amount: usize, seed_nodes_amount: Option<usize>) -> anyhow
node.fetch_and_assign_pod_ip(&client, NAMESPACE).await?;
}

// Build a vector of seed peers NodeAddrs to provide as gossip_static_outbound to the rest of the nodes
// Build a vector of (PublicKey, SocketAddr) to provide as gossip_static_outbound
// to the rest of the nodes
let peers: Vec<_> = seed_nodes
.values()
.map(|n| {
n.node_addr
let node_addr = n
.node_addr
.as_ref()
.expect("Seed node address not defined")
.clone()
.clone();
(node_addr.key, node_addr.addr)
})
.collect();

// Deploy the rest of the nodes
for node in non_seed_nodes.values_mut() {
node.gossip_static_outbound = peers.clone();
node.config.gossip_static_outbound.extend(peers.clone());
node.deploy(&client, NAMESPACE).await?;
}

Ok(())
}

/// Build ConsensusNodes representation list from configurations
// TODO once we can provide config via cli args, this will be replaced
// using in-memory config structs
fn from_configs(nodes: usize) -> anyhow::Result<Vec<k8s::ConsensusNode>> {
let manifest_path = std::env::var("CARGO_MANIFEST_DIR")?;
let root = PathBuf::from(manifest_path).join("k8s_configs");
let mut consensus_nodes = vec![];

for i in 0..nodes {
let node_id = format!("consensus-node-{i:0>2}");
let node_key_path = root.join(&node_id).join("node_key");
let key_string = fs::read_to_string(node_key_path).context("failed reading file")?;
let key = Text::new(&key_string)
.decode()
.context("failed decoding key")?;
consensus_nodes.push(k8s::ConsensusNode {
id: node_id,
key,
node_addr: None,
is_seed: false,
gossip_static_outbound: vec![],
});
}
Ok(consensus_nodes)
}

#[tokio::main]
async fn main() -> anyhow::Result<()> {
let DeployerCLI { command } = DeployerCLI::parse();

match command {
DeployerCommands::GenerateConfig(args) => generate_config(args.nodes),
DeployerCommands::Deploy(args) => deploy(args.nodes, args.seed_nodes).await,
}
let args = DeployerCLI::parse();
deploy(args.nodes, args.seed_nodes).await
}
100 changes: 63 additions & 37 deletions node/tools/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,57 +150,83 @@ impl ProtoFmt for AppConfig {
}
}

/// This struct holds the file path to each of the config files.
/// Configuration information.
#[derive(Debug)]
pub struct ConfigPaths<'a> {
/// Path to a JSON file with node configuration.
pub app: &'a Path,
/// Path to a validator key file.
pub validator_key: Option<&'a Path>,
/// Path to a node key file.
pub node_key: &'a Path,
pub struct ConfigArgs<'a> {
/// Node configuration.
pub config_args: ConfigSource<'a>,
/// Path to the rocksdb database.
pub database: &'a Path,
}

#[derive(Debug)]
pub enum ConfigSource<'a> {
CliConfig {
/// Node configuration from command line.
config: AppConfig,
/// Node key as a string.
node_key: node::SecretKey,
/// Validator key as a string.
validator_key: Option<validator::SecretKey>,
},
PathConfig {
/// Path to a JSON file with node configuration.
config_file: &'a Path,
/// Path to a validator key file.
validator_key_file: &'a Path,
/// Path to a node key file.
node_key_file: &'a Path,
},
}

pub struct Configs {
pub app: AppConfig,
pub validator_key: Option<validator::SecretKey>,
pub node_key: node::SecretKey,
pub database: PathBuf,
}

impl<'a> ConfigPaths<'a> {
impl<'a> ConfigArgs<'a> {
// Loads configs from the file system.
pub fn load(self) -> anyhow::Result<Configs> {
Ok(Configs {
app: (|| {
let app = fs::read_to_string(self.app).context("failed reading file")?;
decode_json::<Serde<AppConfig>>(&app).context("failed decoding JSON")
})()
.with_context(|| self.app.display().to_string())?
.0,

validator_key: self
.validator_key
.as_ref()
.map(|file| {
(|| {
let key = fs::read_to_string(file).context("failed reading file")?;
Text::new(&key).decode().context("failed decoding key")
})()
.with_context(|| file.display().to_string())
})
.transpose()?,

node_key: (|| {
let key = fs::read_to_string(self.node_key).context("failed reading file")?;
Text::new(&key).decode().context("failed decoding key")
})()
.with_context(|| self.node_key.display().to_string())?,

database: self.database.into(),
})
match self.config_args {
ConfigSource::CliConfig {
config,
node_key,
validator_key,
} => Ok(Configs {
app: config.clone(),
validator_key: validator_key.clone(),
node_key: node_key.clone(),
database: self.database.into(),
}),
ConfigSource::PathConfig {
config_file,
validator_key_file,
node_key_file,
} => Ok(Configs {
app: (|| {
let app = fs::read_to_string(config_file).context("failed reading file")?;
decode_json::<Serde<AppConfig>>(&app).context("failed decoding JSON")
})()
.with_context(|| config_file.display().to_string())?
.0,

validator_key: fs::read_to_string(validator_key_file)
.ok()
.map(|value| Text::new(&value).decode().context("failed decoding key"))
.transpose()
.with_context(|| validator_key_file.display().to_string())?,

node_key: (|| {
let key = fs::read_to_string(node_key_file).context("failed reading file")?;
Text::new(&key).decode().context("failed decoding key")
})()
.with_context(|| node_key_file.display().to_string())?,

database: self.database.into(),
}),
}
}
}

Expand Down
Loading

0 comments on commit f6a3b52

Please sign in to comment.