diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a8cee09e3..edfc11952 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -54,4 +54,4 @@ jobs: - name: Get latest version of stable rust run: rustup update stable - name: Check rustdoc links - run: RUSTDOCFLAGS="--deny broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items + run: RUSTDOCFLAGS="--deny rustdoc::broken_intra_doc_links" cargo doc --verbose --workspace --no-deps --document-private-items diff --git a/.gitignore b/.gitignore index be506a9f6..ed0044a24 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ Cargo.lock # VIM swap files *.sw[op] + +# VS Code settings files +/.vscode/ \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 182ca9be1..be08a23e5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ zeroize = { version = "1.4.3", features = ["zeroize_derive"] } futures = "0.3.19" uint = { version = "0.9.1", default-features = false } rlp = "0.5.1" +sha2 = "0.9.5" # This version must be kept up to date do it uses the same dependencies as ENR hkdf = "0.12.3" hex = "0.4.3" @@ -39,6 +40,7 @@ lru = "0.7.1" hashlink = "0.7.0" delay_map = "0.1.1" more-asserts = "0.2.2" +base64 = "0.13.0" [dev-dependencies] rand_07 = { package = "rand", version = "0.7" } diff --git a/examples/find_nodes.rs b/examples/find_nodes.rs index 7e01d10bf..0e060ffea 100644 --- a/examples/find_nodes.rs +++ b/examples/find_nodes.rs @@ -175,7 +175,7 @@ async fn main() { match discv5.find_node(target_random_node_id).await { Err(e) => warn!("Find Node result failed: {:?}", e), Ok(v) => { - // found a list of ENR's print their NodeIds + // found a list of ENRs print their NodeIds let node_ids = v.iter().map(|enr| enr.node_id()).collect::>(); info!("Nodes found: {}", node_ids.len()); for node_id in node_ids { diff --git a/src/advertisement/mod.rs b/src/advertisement/mod.rs new file mode 100644 index 000000000..21c8d5ca6 --- /dev/null +++ b/src/advertisement/mod.rs @@ -0,0 +1,362 @@ +use super::*; +use crate::{enr::NodeId, Enr}; +use core::time::Duration; +use futures::prelude::*; +use more_asserts::debug_unreachable; +use std::{ + collections::{HashMap, VecDeque}, + fmt, + net::IpAddr, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::time::Instant; +use topic::TopicHash; +use tracing::{debug, error}; + +mod test; +pub mod ticket; +pub mod topic; + +/// The max nodes to advertise for a topic. +const MAX_ADS_TOPIC: usize = 100; + +/// The max nodes to advertise. +const MAX_ADS: usize = 50000; + +/// The max ads per subnet per topic. +const MAX_ADS_SUBNET_TOPIC: usize = 5; + +/// The max ads per subnet. +const MAX_ADS_SUBNET: usize = 50; + +/// The time after a REGCONFIRMATION is sent that an ad is placed. +pub const AD_LIFETIME: Duration = Duration::from_secs(60 * 15); + +/// An AdNode is a node that occupies an ad slot on another node. +#[derive(Debug, Clone)] +pub struct AdNode { + /// The node being advertised. + node_record: Enr, + /// The insert_time is used to retrieve the ticket_wait time for a given + /// topic. + insert_time: Instant, +} + +impl AdNode { + pub fn new(node_record: Enr, insert_time: Instant) -> Self { + AdNode { + node_record, + insert_time, + } + } + + pub fn node_record(&self) -> &Enr { + &self.node_record + } +} + +impl PartialEq for AdNode { + fn eq(&self, other: &Self) -> bool { + self.node_record == other.node_record + } +} + +/// An AdTopic keeps track of when an AdNode is created. +#[derive(Clone, Debug)] +struct AdTopic { + /// The topic maps to the topic of an AdNode in Ads's ads. + topic: TopicHash, + /// The insert_time is used to make sure and AdNode persists in Ads + /// only the ad_lifetime duration. + insert_time: Instant, +} + +impl AdTopic { + pub fn new(topic: TopicHash, insert_time: Instant) -> Self { + AdTopic { topic, insert_time } + } +} + +/// The Ads struct contains advertised AdNodes. Topics table is used to refer to +/// all the ads, and the table stores ads by topic. +#[derive(Clone, Debug)] +pub struct Ads { + /// The expirations makes sure that AdNodes are advertised only for the + /// ad_lifetime duration. + expirations: VecDeque, + /// The ads store the AdNodes per TopicHash in FIFO order of expiration. + ads: HashMap>, + /// The ad_lifetime is specified by the spec but can be modified for + /// testing purposes. + ad_lifetime: Duration, + /// The max_ads_per_topic limit is up to the user although recommendations + /// are given in the specs. + max_ads_topic: usize, + /// The max_ads limit is up to the user although recommendations are + /// given in the specs. + max_ads: usize, + /// Max ads per subnet for the whole table, + max_ads_subnet: usize, + /// Max ads per subnet per topic, + max_ads_subnet_topic: usize, + /// Expiration times of ads by subnet + subnet_expirations: HashMap, VecDeque>, +} + +impl Ads { + pub fn new( + ad_lifetime: Duration, + max_ads_topic: usize, + max_ads: usize, + max_ads_subnet: usize, + max_ads_subnet_topic: usize, + ) -> Self { + let (max_ads_topic, max_ads, max_ads_subnet, max_ads_subnet_topic) = + if max_ads_topic > max_ads || max_ads_subnet_topic > max_ads_subnet { + error!( + "Ads per topic [per subnet] cannot be > max_ads [per subnet]. Using default values" + ); + return Self::default(); + } else { + (max_ads_topic, max_ads, max_ads_subnet, max_ads_subnet_topic) + }; + + Ads { + expirations: VecDeque::new(), + ads: HashMap::new(), + ad_lifetime, + max_ads_topic, + max_ads, + max_ads_subnet, + max_ads_subnet_topic, + subnet_expirations: HashMap::new(), + } + } + + pub fn default() -> Self { + Ads::new( + AD_LIFETIME, + MAX_ADS_TOPIC, + MAX_ADS, + MAX_ADS_SUBNET, + MAX_ADS_SUBNET_TOPIC, + ) + } + + /// Checks if there are currently any entries in the topics table. + pub fn is_empty(&self) -> bool { + self.expirations.is_empty() + } + + /// Returns the amount of ads currently in the topics table. + pub fn len(&self) -> usize { + self.expirations.len() + } + + /// Returns an iterator over the ads currently in the topics table for a given topic + /// if any. + pub fn get_ad_nodes(&self, topic: TopicHash) -> impl Iterator + '_ { + self.ads.get(&topic).into_iter().flatten() + } + + /// Ticket wait time enforces diversity among advertised nodes. The ticket wait time is + /// calculated after removing expired entries based on the current state of the topics + /// table (ads). + pub fn ticket_wait_time( + &mut self, + topic: TopicHash, + node_id: NodeId, + ip: IpAddr, + ) -> Option { + self.remove_expired(); + let now = Instant::now(); + // Occupancy check to see if the table is full. + // Similarity check to see if the ad slots for an ip subnet are full. + let subnet = match ip { + IpAddr::V4(ip) => ip.octets()[0..=2].to_vec(), + IpAddr::V6(ip) => ip.octets()[0..=5].to_vec(), + }; + + let wait_time_max_ads_subnet = + if let Some(expirations) = self.subnet_expirations.get_mut(&subnet) { + if expirations.len() >= self.max_ads_subnet { + if let Some(insert_time) = expirations.pop_front() { + expirations.push_front(insert_time); + let elapsed_time = now.saturating_duration_since(insert_time); + Some(self.ad_lifetime.saturating_sub(elapsed_time)) + } else { + None + } + } else { + None + } + } else { + None + }; + + if let Some(nodes) = self.ads.get(&topic) { + let mut subnet_first_insert_time = None; + let mut subnet_ads_count = 0; + for ad in nodes.iter() { + // Similarity check to see if ads with same node id and ip exist for the given topic. + let same_ip = match ip { + IpAddr::V4(ip) => ad.node_record.ip4() == Some(ip), + IpAddr::V6(ip) => ad.node_record.ip6() == Some(ip), + }; + if ad.node_record.node_id() == node_id || same_ip { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); + return Some(wait_time); + } + let subnet_match = match ip { + IpAddr::V4(_) => ad + .node_record + .ip4() + .map(|ip| ip.octets()[0..=2].to_vec() == subnet) + .unwrap_or(false), + IpAddr::V6(_) => ad + .node_record + .ip6() + .map(|ip| ip.octets()[0..=5].to_vec() == subnet) + .unwrap_or(false), + }; + if subnet_match { + if subnet_first_insert_time.is_none() { + subnet_first_insert_time = Some(ad.insert_time); + } + subnet_ads_count += 1; + } + } + // Similarity check to see if the limit of ads per subnet per topic or otherwise table is reached. + // If the ad slots per subnet per topic are not full and neither are the ad slots per subnet for + // the whole table then waiting time is not decided by subnet but by the number of free ad slots + // for the topic. + if subnet_ads_count >= self.max_ads_subnet_topic { + if let Some(insert_time) = subnet_first_insert_time { + let elapsed_time = now.saturating_duration_since(insert_time); + let wait_time = self.ad_lifetime.saturating_sub(elapsed_time); + return Some(wait_time); + } + } + if wait_time_max_ads_subnet.is_some() { + return wait_time_max_ads_subnet; + } + + // Occupancy check to see if the ad slots for a certain topic are full. + if nodes.len() >= self.max_ads_topic { + return nodes.front().map(|ad| { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }); + } + } + // Similarity check to see if the limit of ads per subnet per table is reached. + if wait_time_max_ads_subnet.is_some() { + return wait_time_max_ads_subnet; + } + // If the ad slots per topic are not full and neither is the table then waiting time is None, + // otherwise waiting time is that of the next ad in the table to expire. + if self.expirations.len() < self.max_ads { + None + } else { + self.expirations.front().map(|ad| { + let elapsed_time = now.saturating_duration_since(ad.insert_time); + self.ad_lifetime.saturating_sub(elapsed_time) + }) + } + } + + /// Removes ads that have been in the topics table for at least the ad lifetime specified in [`Ads`]. + fn remove_expired(&mut self) { + let mut to_remove_ads: HashMap = HashMap::new(); + + self.expirations + .iter() + .take_while(|ad| ad.insert_time.elapsed() >= self.ad_lifetime) + .for_each(|ad| { + *to_remove_ads.entry(ad.topic).or_default() += 1; + }); + + to_remove_ads.into_iter().for_each(|(topic, index)| { + if let Some(topic_ads) = self.ads.get_mut(&topic) { + for i in 0..index { + let ad = topic_ads.pop_front(); + if let Some(ad) = ad { + let subnet = if let Some(ip) = ad.node_record.ip4() { + Some(ip.octets()[0..=2].to_vec()) + } else { + ad.node_record.ip6().map(|ip6| ip6.octets()[0..=5].to_vec()) + }; + if let Some(subnet) = subnet { + if let Some(subnet_expiries) = self.subnet_expirations.get_mut(&subnet) { + subnet_expiries.pop_front(); + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations by subnet. At least {} ads should exist for subnet {:?}", i+1, subnet); + } + } + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations. At least {} ads should exist for topic hash {}", i+1, topic) + } + self.expirations.pop_front(); + } + if topic_ads.is_empty() { + self.ads.remove(&topic); + } + + } else { + debug_unreachable!("Mismatched mapping between ads and their expirations. An entry should exist for topic hash {}", topic); + } + }); + } + + /// Inserts a unique node record - topic mapping into the topics table after removing expired entries. + pub fn insert( + &mut self, + node_record: Enr, + topic: TopicHash, + ip: IpAddr, + ) -> Result<(), (Duration, &str)> { + if let Some(wait_time) = self.ticket_wait_time(topic, node_record.node_id(), ip) { + return Err((wait_time, "There is currently no ad slot free for this node - topic combination. Discarding registration attempt.")); + } + self.remove_expired(); + let now = Instant::now(); + + let subnet = if let Some(ip) = node_record.ip4() { + Some(ip.octets()[0..=2].to_vec()) + } else { + node_record.ip6().map(|ip6| ip6.octets()[0..=5].to_vec()) + }; + if let Some(subnet) = subnet { + let subnet_expiries = self + .subnet_expirations + .entry(subnet) + .or_insert_with(VecDeque::new); + subnet_expiries.push_back(now); + } + let nodes = self.ads.entry(topic).or_default(); + + let ad_node = AdNode::new(node_record, now); + nodes.push_back(ad_node); + self.expirations.push_back(AdTopic::new(topic, now)); + Ok(()) + } +} + +impl fmt::Display for Ads { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ads = self + .ads + .iter() + .map(|ad| { + let ad_node_ids = + ad.1.iter() + .map(|ad_node| base64::encode(ad_node.node_record.node_id().raw())) + .collect::>(); + format!("Topic: {}, Advertised at: {:?}", ad.0, ad_node_ids) + }) + .collect::>(); + write!(f, "{:?}", ads) + } +} diff --git a/src/advertisement/test.rs b/src/advertisement/test.rs new file mode 100644 index 000000000..36fbf525c --- /dev/null +++ b/src/advertisement/test.rs @@ -0,0 +1,256 @@ +#![cfg(test)] + +use super::*; +use crate::advertisement::topic::Sha256Topic as Topic; +use enr::{CombinedKey, EnrBuilder}; +use more_asserts::{assert_gt, assert_lt}; +use std::net::IpAddr; + +#[tokio::test] +async fn insert_same_node() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100); + + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + + ads.insert(enr.clone(), topic, ip).unwrap(); + + // Since 2 seconds haven't passed + assert_ne!(ads.insert(enr.clone(), topic, ip), Ok(())); + + tokio::time::sleep(Duration::from_secs(2)).await; + ads.insert(enr.clone(), topic, ip).unwrap(); +} + +#[tokio::test] +async fn insert_ad_and_get_nodes() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 10, 50, 100, 100); + + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + + // Add an ad for topic from enr + ads.insert(enr.clone(), topic, ip).unwrap(); + + // The ad hasn't expired and duplicates are not allowed + assert_ne!(ads.insert(enr.clone(), topic, ip), Ok(())); + + // Add an ad for topic from enr_2 + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); + + // Add an ad for topic_2 from enr + ads.insert(enr.clone(), topic_2, ip).unwrap(); + + let nodes: Vec<&Enr> = ads + .get_ad_nodes(topic) + .map(|ad_node| ad_node.node_record()) + .collect(); + + let nodes_topic_2: Vec<&Enr> = ads + .get_ad_nodes(topic_2) + .map(|ad_node| ad_node.node_record()) + .collect(); + + assert_eq!(nodes, vec![&enr, &enr_2]); + assert_eq!(nodes_topic_2, vec![&enr]); +} + +#[tokio::test] +async fn ticket_wait_time_no_wait_time() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let mut ads = Ads::new(Duration::from_secs(1), 10, 50, 100, 100); + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None) +} + +#[tokio::test] +async fn ticket_wait_time_duration() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let mut ads = Ads::new(Duration::from_secs(3), 1, 3, 100, 100); + + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + + // Add an add for topic + ads.insert(enr.clone(), topic, ip).unwrap(); + + assert_gt!( + ads.ticket_wait_time(topic, enr.node_id(), ip), + Some(Duration::from_secs(2)) + ); + assert_lt!( + ads.ticket_wait_time(topic, enr.node_id(), ip), + Some(Duration::from_secs(3)) + ); +} + +#[tokio::test] +async fn ticket_wait_time_full_table() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(3), 2, 3, 100, 100); + + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + + // Add 2 ads for topic + ads.insert(enr.clone(), topic, ip).unwrap(); + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); + + tokio::time::sleep(Duration::from_secs(2)).await; + + // Add an ad for topic_2 + ads.insert(enr.clone(), topic_2, ip).unwrap(); + + // Now max_ads in table is reached so the second ad for topic_2 has to wait + assert_ne!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); + + tokio::time::sleep(Duration::from_secs(3)).await; + + // Now the first ads have expired and the table is not full so no neither topic + // or topic_2 ads have to wait + assert_eq!(ads.ticket_wait_time(topic, enr.node_id(), ip), None); + assert_eq!(ads.ticket_wait_time(topic_2, enr_2.node_id(), ip_2), None); +} + +#[tokio::test] +async fn ticket_wait_time_full_topic() { + // Create the test values needed + let port = 6666; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 5000; + let ip_2: IpAddr = "192.168.0.1".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let port_3 = 5000; + let ip_3: IpAddr = "8.8.8.8".parse().unwrap(); + let key_3 = CombinedKey::generate_secp256k1(); + let enr_3 = EnrBuilder::new("v4") + .ip(ip_3) + .udp4(port_3) + .build(&key_3) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(3), 2, 4, 100, 100); + + let topic = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + + // Add 2 ads for topic + ads.insert(enr.clone(), topic, ip).unwrap(); + ads.insert(enr_2.clone(), topic, ip_2).unwrap(); + + // Now max_ads_per_topic is reached for topic + assert_ne!(ads.ticket_wait_time(topic, enr_3.node_id(), ip_3), None); + + // Add a topic_2 ad + ads.insert(enr.clone(), topic_2, ip).unwrap(); + + // The table isn't full so topic_2 ads don't have to wait + assert_eq!(ads.ticket_wait_time(topic_2, enr_2.node_id(), ip_2), None); + + tokio::time::sleep(Duration::from_secs(3)).await; + assert_eq!(ads.ticket_wait_time(topic, enr_3.node_id(), ip), None); +} + +#[tokio::test] +async fn ticket_wait_time_full_subnet() { + let port = 1510; + let ip: IpAddr = "192.168.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 1995; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1); + let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + let topic_3 = Topic::new(std::str::from_utf8(&[3u8; 32]).unwrap()).hash(); + + ads.insert(enr.clone(), topic_1, ip).unwrap(); + ads.insert(enr_2, topic_2, ip_2).unwrap(); + + assert_ne!(ads.ticket_wait_time(topic_3, enr.node_id(), ip), None); +} + +#[tokio::test] +async fn ticket_wait_time_full_subnet_topic() { + let port = 1510; + let ip: IpAddr = "192.168.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + + let port_2 = 1995; + let ip_2: IpAddr = "192.168.0.2".parse().unwrap(); + let key_2 = CombinedKey::generate_secp256k1(); + let enr_2 = EnrBuilder::new("v4") + .ip(ip_2) + .udp4(port_2) + .build(&key_2) + .unwrap(); + + let mut ads = Ads::new(Duration::from_secs(2), 2, 4, 2, 1); + let topic_1 = Topic::new(std::str::from_utf8(&[1u8; 32]).unwrap()).hash(); + let topic_2 = Topic::new(std::str::from_utf8(&[2u8; 32]).unwrap()).hash(); + + ads.insert(enr.clone(), topic_1, ip).unwrap(); + + assert_ne!(ads.ticket_wait_time(topic_1, enr_2.node_id(), ip), None); + assert_eq!(ads.ticket_wait_time(topic_2, enr.node_id(), ip), None); +} diff --git a/src/advertisement/ticket.rs b/src/advertisement/ticket.rs new file mode 100644 index 000000000..5d3a4a8ac --- /dev/null +++ b/src/advertisement/ticket.rs @@ -0,0 +1,231 @@ +use super::*; +use crate::Topic; +use delay_map::HashMapDelay; +use enr::NodeId; +use node_info::NodeContact; +use std::{cmp::Eq, hash::Hash}; + +/// The max wait time accepted for tickets. +pub const MAX_WAIT_TIME_TICKET: u64 = 60 * 5; + +/// The time window within which the number of new tickets from a peer for a topic will be limited. +pub const TICKET_LIMIT_DURATION: Duration = Duration::from_secs(60 * 15); + +/// Max tickets that are stored for an individual node for a topic (in the configured +/// time period). +pub const MAX_TICKETS_PER_NODE_PER_TOPIC: u8 = 3; + +/// A topic is active when it's associated with the NodeId from a node it is +/// published on. +#[derive(PartialEq, Eq, Clone, Hash)] +pub struct ActiveTopic { + /// NodeId of the sender of the TICKET response. + node_id: NodeId, + /// The topic hash as it is sent in the TICKET response. + topic: Topic, +} + +impl ActiveTopic { + /// Makes a topic active (currently associated with an ad slot or a ticket) by + /// associating it with a node id. + pub fn new(node_id: NodeId, topic: Topic) -> Self { + ActiveTopic { node_id, topic } + } + + /// Returns the topic of a topic that is active. + pub fn topic(&self) -> &Topic { + &self.topic + } + + /// Returns the node id of a topic that is active. + pub fn node_id(&self) -> &NodeId { + &self.node_id + } +} + +/// A ticket is active when it is associated with the node contact of +/// the sender of the ticket. +pub struct ActiveTicket { + /// Node Contact of the sender of the ticket. + contact: NodeContact, + /// The ticket, an opaque object to the receiver. + ticket: Vec, +} + +impl ActiveTicket { + /// Makes a ticket active (currently stored waiting to be used in a new registration + /// attempt when its ticket wait time has expired) by associating it with a node + /// contact. + pub fn new(contact: NodeContact, ticket: Vec) -> Self { + ActiveTicket { contact, ticket } + } + + /// Returns the node contact of a ticket that is active. + pub fn contact(&self) -> NodeContact { + self.contact.clone() + } + + /// Returns the ticket of a ticket that is active. + pub fn ticket(&self) -> Vec { + self.ticket.clone() + } +} + +/// Tickets holds the tickets received in TICKET responses to locally initiated +/// REGTOPIC requests. +pub struct Tickets { + /// Tickets maps an [`ActiveTopic`] to an [`ActiveTicket`]. + tickets: HashMapDelay, + /// TicketHistory sets a time limit to how many times the [`ActiveTicket`] + /// value in tickets can be updated within a given ticket limit duration. + ticket_history: TicketHistory, +} + +impl Tickets { + pub fn new(ticket_limiter_duration: Duration) -> Self { + Tickets { + tickets: HashMapDelay::new(Duration::default()), + ticket_history: TicketHistory::new(ticket_limiter_duration), + } + } + + pub fn default() -> Self { + Tickets::new(TICKET_LIMIT_DURATION) + } + + /// Inserts a ticket into [`Tickets`] if the state of [`TicketHistory`] allows it. + pub fn insert( + &mut self, + contact: NodeContact, + ticket: Vec, + wait_time: Duration, + topic: Topic, + ) -> Result<(), &str> { + let active_topic = ActiveTopic::new(contact.node_id(), topic); + + self.ticket_history.insert(active_topic.clone())?; + + self.tickets + .insert_at(active_topic, ActiveTicket::new(contact, ticket), wait_time); + Ok(()) + } + + /// Removes all tickets held for the given topic. + pub fn remove(&mut self, topic: &Topic) { + self.tickets + .retain(|active_topic, _| active_topic.topic() != topic); + } +} + +impl Stream for Tickets { + type Item = Result<(ActiveTopic, ActiveTicket), String>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.tickets.poll_next_unpin(cx) { + Poll::Ready(Some(Ok((active_topic, ticket)))) => { + Poll::Ready(Some(Ok((active_topic, ticket)))) + } + Poll::Ready(Some(Err(e))) => { + error!( + "Failed to fetch next ticket with expired wait time. Error {}", + e + ); + Poll::Ready(Some(Err(e))) + } + // When the hashmap delay holding tickets is empty, as we poll this tickets stream in a + // select! statement, to avoid re-polling the stream till it fills up again with new + // tickets pending a re-attempt at registration we return Poll::Pending. + Poll::Ready(None) => Poll::Pending, + Poll::Pending => Poll::Pending, + } + } +} + +/// A PendingTicket maps to a Ticket received by another node in Tickets upon insert. +#[derive(Clone)] +struct PendingTicket { + /// The [`ActiveTopic`] serves to match the [`ActiveTicket`] to an entry in [`Tickets`]' + /// tickets HashMapDelay. + active_topic: ActiveTopic, + /// The insert_time is used to check [`MAX_TICKETS_PER_NODE_PER_TOPIC`] against + /// the ticket_limiter_duration. + insert_time: Instant, +} + +/// TicketHistory keeps track of how many times a ticket was replaced for +/// an [`ActiveTopic`] within the time limit given by ticket_limiter_duration +/// and limits it to [`MAX_TICKETS_PER_NODE_PER_TOPIC`] times. +#[derive(Default)] +pub struct TicketHistory { + /// The ticket_count keeps track of how many tickets are stored for the + /// ActiveTopic. + ticket_count: HashMap, + /// Up to [`MAX_TICKETS_PER_NODE_PER_TOPIC`] PendingTickets in expirations map + /// to an ActiveTopic in ticket_count. + expirations: VecDeque, + /// The time a PendingTicket remains in expirations. + ticket_limit_duration: Duration, +} + +impl TicketHistory { + fn new(ticket_limit_duration: Duration) -> Self { + TicketHistory { + ticket_count: HashMap::new(), + expirations: VecDeque::new(), + ticket_limit_duration, + } + } + + /// Inserts a ticket into [`TicketHistory`] unless the ticket of the given active + /// topic has already been updated the limit amount of [`MAX_TICKETS_PER_NODE_PER_TOPIC`] + /// times per ticket limit duration, then it is discarded and an error is returned. + /// Expired entries are removed before insertion. + pub fn insert(&mut self, active_topic: ActiveTopic) -> Result<(), &str> { + self.remove_expired(); + let insert_time = Instant::now(); + let count = self.ticket_count.entry(active_topic.clone()).or_default(); + if *count >= MAX_TICKETS_PER_NODE_PER_TOPIC { + debug!( + "Max {} tickets per NodeId - Topic mapping accepted in {} minutes", + MAX_TICKETS_PER_NODE_PER_TOPIC, + self.ticket_limit_duration.as_secs() + ); + return Err("Ticket limit reached"); + } + *count += 1; + self.expirations.push_back(PendingTicket { + active_topic, + insert_time, + }); + Ok(()) + } + + /// Removes entries that have been stored for at least the ticket limit duration. + /// If the same [`ActiveTopic`] is inserted again the count up till + /// [`MAX_TICKETS_PER_NODE_PER_TOPIC`] inserts/updates starts anew. + fn remove_expired(&mut self) { + let now = Instant::now(); + let ticket_limiter_duration = self.ticket_limit_duration; + let ticket_count = &mut self.ticket_count; + let total_to_remove = self + .expirations + .iter() + .take_while(|pending_ticket| { + now.saturating_duration_since(pending_ticket.insert_time) >= ticket_limiter_duration + }) + .map(|pending_ticket| { + let count = ticket_count + .entry(pending_ticket.active_topic.clone()) + .or_default(); + if *count > 1 { + *count -= 1; + } else { + ticket_count.remove(&pending_ticket.active_topic); + } + }) + .count(); + + for _ in 0..total_to_remove { + self.expirations.pop_front(); + } + } +} diff --git a/src/advertisement/topic.rs b/src/advertisement/topic.rs new file mode 100644 index 000000000..bab276685 --- /dev/null +++ b/src/advertisement/topic.rs @@ -0,0 +1,271 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use base64::encode; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; +use sha2::{Digest, Sha256}; +use std::{fmt, hash::Hash}; +use tracing::debug; + +//pub type IdentTopic = Topic; +pub type Sha256Topic = Topic; + +/// A generic trait that can be extended for various hashing types for a topic. +pub trait Hasher { + /// The function that takes a topic string and creates a topic hash. + fn hash(topic_string: String) -> TopicHash; +} + +/// A type for representing topics who use the identity hash. +/*#[derive(Debug, Clone)] +pub struct IdentityHash {} +impl Hasher for IdentityHash { + /// Creates a [`TopicHash`] as a raw string. + fn hash(topic_string: String) -> TopicHash { + TopicHash { hash: topic_string.as_bytes() } + } +}*/ + +#[derive(Debug, Clone)] +pub struct Sha256Hash {} + +impl Hasher for Sha256Hash { + /// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the + /// hash. + fn hash(topic_string: String) -> TopicHash { + let sha256 = Sha256::digest(topic_string.as_bytes()); + let mut hash = [0u8; 32]; + hash.copy_from_slice(&sha256); + TopicHash { hash } + } +} + +/// The 32-bytes that are sent in the body of a topic request are interpreted +/// as a hash by the agreed upon hash algorithm in the discv5 network (defaults +/// to Sha256). +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct TopicHash { + /// The topic hash. Stored as a fixed length byte array. + hash: [u8; 32], +} + +impl TopicHash { + /// Returns a topic hash wrapping the given 32 bytes. + pub fn from_raw(hash: [u8; 32]) -> TopicHash { + TopicHash { hash } + } + + /// Returns the raw 32 bytes inside a topic hash. + pub fn as_bytes(&self) -> [u8; 32] { + self.hash + } +} + +impl Encodable for TopicHash { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.hash.to_vec()); + } +} + +impl Decodable for TopicHash { + fn decode(rlp: &Rlp<'_>) -> Result { + let topic = { + let topic_bytes = rlp.data()?; + if topic_bytes.len() > 32 { + debug!("Topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(topic_bytes); + topic + }; + Ok(TopicHash::from_raw(topic)) + } +} + +impl fmt::Display for TopicHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", encode(self.hash)) + } +} + +/// A topic, as in sigpi/rust-libp2p/protocols/gossipsub. +#[derive(Debug, Clone)] +pub struct Topic { + /// The topic string passed to the topic upon instantiation. + topic: String, + /// The configured [`Hasher`] is stored within the topic. + phantom_data: std::marker::PhantomData, +} + +impl From> for TopicHash { + fn from(topic: Topic) -> TopicHash { + topic.hash() + } +} + +impl Topic { + /// Returns a new topic. + pub fn new(topic: impl Into) -> Self { + Topic { + topic: topic.into(), + phantom_data: std::marker::PhantomData, + } + } + + /// Returns a hash of the topic using the [`Hasher`] configured for the topic. + pub fn hash(&self) -> TopicHash { + H::hash(self.topic.clone()) + } + + /// Returns the string passed to the topic upon instantiation. + pub fn topic(&self) -> String { + self.topic.clone() + } +} + +impl Hash for Topic { + fn hash(&self, state: &mut T) { + self.hash().hash(state) + } +} + +impl PartialEq for Topic { + /// Each hash algorithm used to publish a hashed topic (as XOR metric key) is in + /// discv5 seen as its own [`Topic`] upon comparison. That means a topic string + /// can be published/registered more than once using different [`Hasher`]s. + fn eq(&self, other: &Topic) -> bool { + self.hash() == other.hash() + } +} + +impl Eq for Topic {} + +impl fmt::Display for Topic { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.topic) + } +} + +pub struct TopicsEnrField { + topics: Vec>, +} + +impl TopicsEnrField { + pub fn new(topics: Vec>) -> Self { + TopicsEnrField { topics } + } + + pub fn add(&mut self, topic: Topic) { + self.topics.push(topic); + } + + pub fn topics_iter(&self) -> impl Iterator> { + self.topics.iter() + } + + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(topics_field: &[u8]) -> Result, DecoderError> { + if !topics_field.is_empty() { + let rlp = Rlp::new(topics_field); + let topics = rlp.as_val::>()?; + return Ok(Some(topics)); + } + Ok(None) + } +} + +impl rlp::Encodable for TopicsEnrField { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.topics.len()); + for topic in self.topics.iter() { + s.append(&topic.topic().as_bytes()); + } + } +} + +impl rlp::Decodable for TopicsEnrField { + fn decode(rlp: &Rlp<'_>) -> Result { + if !rlp.is_list() { + debug!( + "Failed to decode ENR field 'topics'. Not an RLP list: {}", + rlp + ); + return Err(DecoderError::RlpExpectedToBeList); + } + + let item_count = rlp.iter().count(); + let mut decoded_list: Vec> = rlp.iter().collect(); + + let mut topics = Vec::new(); + + for _ in 0..item_count { + match decoded_list.remove(0).data() { + Ok(data) => match std::str::from_utf8(data) { + Ok(topic_string) => { + let topic = Topic::new(topic_string); + topics.push(topic); + } + Err(e) => { + debug!("Failed to decode topic as utf8. Error: {}", e); + return Err(DecoderError::Custom("Topic is not utf8 encoded")); + } + }, + Err(e) => { + debug!("Failed to decode item. Error: {}", e); + return Err(DecoderError::RlpExpectedToBeData); + } + } + } + Ok(TopicsEnrField { topics }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encode_decode_topics_enr_field() { + let topics: Vec = vec![ + Topic::new("lighthouse"), + Topic::new("eth_syncing"), + Topic::new("eth_feeHistory"), + ]; + + let topics_field = TopicsEnrField::new(topics.clone()); + + let encoded = topics_field.encode(); + let decoded = TopicsEnrField::::decode(&encoded) + .unwrap() + .unwrap(); + + for (index, item) in decoded.topics_iter().enumerate() { + assert_eq!(item.topic(), topics[index].topic()); + } + } +} diff --git a/src/config.rs b/src/config.rs index a5acd4d44..f671a35aa 100644 --- a/src/config.rs +++ b/src/config.rs @@ -68,7 +68,7 @@ pub struct Discv5Config { /// to contact an ENR. pub ip_mode: IpMode, - /// Reports all discovered ENR's when traversing the DHT to the event stream. Default true. + /// Reports all discovered ENRs when traversing the DHT to the event stream. Default true. pub report_discovered_peers: bool, /// A set of configuration parameters for setting inbound request rate limits. See @@ -96,6 +96,11 @@ pub struct Discv5Config { /// will last indefinitely. Default is 1 hour. pub ban_duration: Option, + /// A topic look up should time out after a set duration, after which no more TOPICQUERY requests should + /// be sent to peers regardless of the number of results found. This is in order to avoid starvation. The + /// default value is 60 seconds. + pub topic_query_timeout: Duration, + /// A custom executor which can spawn the discv5 tasks. This must be a tokio runtime, with /// timing support. By default, the executor that created the discv5 struct will be used. pub executor: Option>, @@ -136,6 +141,7 @@ impl Default for Discv5Config { filter_max_bans_per_ip: Some(5), permit_ban_list: PermitBanList::default(), ban_duration: Some(Duration::from_secs(3600)), // 1 hour + topic_query_timeout: Duration::from_secs(60), ip_mode: IpMode::default(), executor: None, } diff --git a/src/discv5.rs b/src/discv5.rs index 52a94b9d2..a415cd597 100644 --- a/src/discv5.rs +++ b/src/discv5.rs @@ -13,25 +13,27 @@ //! The server can be shutdown using the [`Discv5::shutdown`] function. use crate::{ + advertisement::topic::TopicHash, error::{Discv5Error, QueryError, RequestError}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, NodeStatus, UpdateResult, }, node_info::NodeContact, - service::{QueryKind, Service, ServiceRequest, TalkRequest}, - Discv5Config, Enr, + service::{QueryKind, RegAttempts, Service, ServiceRequest, TalkRequest}, + Discv5Config, Enr, Topic, }; use enr::{CombinedKey, EnrError, EnrKey, NodeId}; use parking_lot::RwLock; use std::{ + collections::{BTreeMap, HashMap}, future::Future, net::SocketAddr, sync::Arc, time::{Duration, Instant}, }; use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, warn}; +use tracing::{debug, error, warn}; #[cfg(feature = "libp2p")] use libp2p_core::Multiaddr; @@ -43,15 +45,36 @@ lazy_static! { RwLock::new(crate::PermitBanList::default()); } +/// Helper function that returns the hash of the given topic string according to the +/// implemented hashing algorithm. +pub static HASH: for<'a> fn(topic: &'a str) -> TopicHash = |topic| { + let sha256_topic = Topic::new(topic); + sha256_topic.hash() +}; + +/// The duration which pending entries have to be dormant before they are considered +/// for insertion in a kbucket. +pub(crate) const KBUCKET_PENDING_TIMEOUT: Duration = Duration::from_secs(60); + +/// Custom ENR keys. +const ENR_KEY_FEATURES: &str = "features"; +pub const ENR_KEY_TOPICS: &str = "topics"; + +/// Discv5 features. +pub enum Features { + /// The protocol for advertising and looking up to topics in Discv5 is supported. + Topics = 1, +} + mod test; /// Events that can be produced by the `Discv5` event stream. #[derive(Debug)] pub enum Discv5Event { - /// A node has been discovered from a FINDNODES request. + /// A node has been discovered from a FINDNODE request. /// /// The ENR of the node is returned. Various properties can be derived from the ENR. - /// This happen spontaneously through queries as nodes return ENR's. These ENR's are not + /// This happen spontaneously through queries as nodes return ENRs. These ENRs are not /// guaranteed to be live or contactable. Discovered(Enr), /// A new ENR was added to the routing table. @@ -87,13 +110,13 @@ pub struct Discv5 { impl Discv5 { pub fn new( - local_enr: Enr, + mut local_enr: Enr, enr_key: CombinedKey, mut config: Discv5Config, ) -> Result { - // ensure the keypair matches the one that signed the enr. + // ensure the key-pair matches the one that signed the enr. if local_enr.public_key() != enr_key.public() { - return Err("Provided keypair does not match the provided ENR"); + return Err("Provided key-pair does not match the provided ENR"); } // If an executor is not provided, assume a current tokio runtime is running. If not panic. @@ -113,16 +136,24 @@ impl Discv5 { (None, None) }; + // This node supports topic requests REGTOPIC and TOPICQUERY, and their responses. + if let Err(e) = local_enr.insert(ENR_KEY_FEATURES, &[Features::Topics as u8], &enr_key) { + error!("Failed writing to enr. Error {:?}", e); + return Err("Failed to insert field 'features' into local enr"); + } + let local_enr = Arc::new(RwLock::new(local_enr)); let enr_key = Arc::new(RwLock::new(enr_key)); let kbuckets = Arc::new(RwLock::new(KBucketsTable::new( local_enr.read().node_id().into(), - Duration::from_secs(60), + KBUCKET_PENDING_TIMEOUT, config.incoming_bucket_limit, table_filter, bucket_filter, ))); + println!("{:?}", local_enr.read().get(ENR_KEY_FEATURES).unwrap()); + // Update the PermitBan list based on initial configuration *PERMIT_BAN_LIST.write() = config.permit_ban_list.clone(); @@ -177,7 +208,7 @@ impl Discv5 { /// operations involving one of these peers, without having to dial /// them upfront. pub fn add_enr(&self, enr: Enr) -> Result<(), &'static str> { - // only add ENR's that have a valid udp socket. + // only add ENRs that have a valid udp socket. if self.config.ip_mode.get_contactable_addr(&enr).is_none() { warn!("ENR attempted to be added without an UDP socket compatible with configured IpMode has been ignored."); return Err("ENR has no compatible UDP socket to connect to"); @@ -246,7 +277,7 @@ impl Discv5 { nodes_to_send } - /// Mark a node in the routing table as `Disconnnected`. + /// Mark a node in the routing table as `Disconnected`. /// /// A `Disconnected` node will be present in the routing table and will be only /// used if there are no other `Connected` peers in the bucket. @@ -399,7 +430,7 @@ impl Discv5 { .collect() } - /// Returns an iterator over all the ENR's of nodes currently contained in the routing table. + /// Returns an iterator over all the ENRs of nodes currently contained in the routing table. pub fn table_entries_enr(&self) -> Vec { self.kbuckets .write() @@ -494,6 +525,219 @@ impl Discv5 { } } + /// Returns an iterator over all ENR node IDs of nodes currently contained in the kbuckets of a given topic. + pub fn table_entries_id_topic( + &self, + topic: &'static str, + ) -> impl Future>, RequestError>> { + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let (callback_send, callback_recv) = oneshot::channel(); + + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + + let event = ServiceRequest::TableEntriesIdTopicKBuckets(topic_hash, callback_send); + + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive table entries' ids for topic {} with topic hash {}. Error {}", topic, topic_hash, e)))? + } + } + + /// Looks up a given topic on other nodes that, if currently advertising the given topic, return the enrs of + /// those ads. The query keeps going through the given topic's kbuckets until a certain number (passed to + /// [`crate::service::ActiveTopicQueries`] upon instantiation) of results are obtained or the query times out. + pub fn topic_query( + &self, + topic: &'static str, + ) -> impl Future, RequestError>> + 'static { + let channel = self.clone_channel(); + + async move { + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + + let topic = Topic::new(topic); + + let event = ServiceRequest::TopicQuery(topic.clone(), callback_send); + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + let ad_nodes = callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive ad nodes from lookup of topic {} with topic hash {}. Error {}", + topic, topic.hash(), e + )) + })?; + if ad_nodes.is_ok() { + debug!( + "Received ad nodes for topic {} with topic hash {}", + topic, + topic.hash() + ); + } + ad_nodes + } + } + + /// Removes a topic we do not wish to keep advertising on other nodes. This does not tell any nodes + /// we are currently advertised on to remove us as advertisements, however in the next registration + /// interval no registration attempts will be made for the topic. + pub fn remove_topic( + &self, + topic_str: &'static str, + ) -> impl Future> + 'static { + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let (callback_send, callback_recv) = oneshot::channel(); + let topic = Topic::new(topic_str); + let event = ServiceRequest::StopRegistrationOfTopic(topic, callback_send); + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive result from remove topic operation for topic {}. Error {}", + topic_str, e + )) + })? + } + } + + /// Add a topic to register on other nodes. A topic is continuously re-registered when it is + /// its turn in the registration interval. To avoid bottlenecks, not necessarily all topics + /// nor all distances of a topic's kbuckets are covered in each registration interval. To stop + /// registering a topic it must be removed by calling remove_topic. + pub fn register_topic( + &self, + topic_str: &'static str, + ) -> impl Future> + 'static { + let channel = self.clone_channel(); + + async move { + let channel = channel + .as_ref() + .map_err(|_| RequestError::ServiceNotStarted)?; + let topic = Topic::new(topic_str); + debug!( + "Registering topic {} with topic hash {}", + topic, + topic.hash(), + ); + let (callback_send, callback_recv) = oneshot::channel(); + let event = ServiceRequest::RegisterTopic(topic, callback_send); + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive result from register topic operation for topic {}. Error {}", + topic_str, e + )) + })? + } + } + + /// Retrieves the registration attempts for a given topic, either confirmed registrations that + /// are still active on other nodes or registration attempts that returned tickets we are + /// currently waiting on to expire (ticket wait time) before re-attempting registration at that + /// same node. Caution! The returned map will also contain + pub fn reg_attempts( + &self, + topic_str: &'static str, + ) -> impl Future, RequestError>> + 'static { + let channel = self.clone_channel(); + let (callback_send, callback_recv) = oneshot::channel(); + + async move { + let channel = channel + .as_ref() + .map_err(|_| RequestError::ServiceNotStarted)?; + let topic = Topic::new(topic_str); + let topic_hash = topic.hash(); + let event = ServiceRequest::RegistrationAttempts(topic, callback_send); + + channel + .send(event) + .await + .map_err(|_| RequestError::ServiceNotStarted)?; + callback_recv.await.map_err(|e| RequestError::ChannelFailed(format!("Failed to receive registration attempts for topic {} with topic hash {}. Error {}", topic_str, topic_hash, e)))? + } + } + /// Retrieves the topics that we have published on other nodes. + pub fn active_topics( + &self, + ) -> impl Future>, RequestError>> + 'static { + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + + let event = ServiceRequest::ActiveTopics(callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!("Failed to receive active topics. Error {}", e)) + })? + } + } + + /// Returns the enrs of ads currently advertised locally on behalf of other nodes for a given topic. + pub fn ads( + &self, + topic: &'static str, + ) -> impl Future, RequestError>> + 'static { + // the service will verify if this node is contactable, we just send it and + // await a response. + let (callback_send, callback_recv) = oneshot::channel(); + let channel = self.clone_channel(); + + async move { + let channel = channel.map_err(|_| RequestError::ServiceNotStarted)?; + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + let event = ServiceRequest::Ads(topic_hash, callback_send); + + // send the request + channel + .send(event) + .await + .map_err(|_| RequestError::ChannelFailed("Service channel closed".into()))?; + // await the response + callback_recv.await.map_err(|e| { + RequestError::ChannelFailed(format!( + "Failed to receive ads for topic {} with topic hash {}. Error {}", + topic, topic_hash, e + )) + }) + } + } + /// Runs an iterative `FIND_NODE` request. /// /// This will return peers containing contactable nodes of the DHT closest to the @@ -610,3 +854,21 @@ impl Drop for Discv5 { self.shutdown(); } } + +/// Check if a given peer supports a given feature of the Discv5 protocol. +pub fn supports_feature(peer: &Enr, feature: Features) -> bool { + if let Some(supported_features) = peer.get(ENR_KEY_FEATURES) { + if let Some(supported_features_num) = supported_features.first() { + let feature_num = feature as u8; + supported_features_num & feature_num == feature_num + } else { + false + } + } else { + warn!( + "Enr of peer {} doesn't contain field 'features'", + peer.node_id() + ); + false + } +} diff --git a/src/discv5/test.rs b/src/discv5/test.rs index e64628597..b9c233f96 100644 --- a/src/discv5/test.rs +++ b/src/discv5/test.rs @@ -1,6 +1,9 @@ #![cfg(test)] -use crate::{kbucket, Discv5, *}; +use crate::{ + discv5::{supports_feature, Features, ENR_KEY_FEATURES}, + kbucket, Discv5, *, +}; use enr::{k256, CombinedKey, Enr, EnrBuilder, EnrKey, NodeId}; use rand_core::{RngCore, SeedableRng}; use std::{collections::HashMap, net::Ipv4Addr}; @@ -624,3 +627,23 @@ async fn test_bucket_limits() { // Number of entries should be equal to `bucket_limit`. assert_eq!(discv5.kbuckets.read().iter_ref().count(), bucket_limit); } + +#[test] +fn test_features_check() { + // Create the test values needed + let port = 6666; + let ip: std::net::IpAddr = "127.0.0.1".parse().unwrap(); + let key = CombinedKey::generate_secp256k1(); + let mut enr = crate::enr::EnrBuilder::new("v4") + .ip(ip) + .udp4(port) + .build(&key) + .unwrap(); + + let supported_versions = Features::Topics as u8; + + enr.insert(ENR_KEY_FEATURES, &[supported_versions], &key) + .unwrap(); + + assert!(supports_feature(&enr, Features::Topics)); +} diff --git a/src/error.rs b/src/error.rs index 633016b68..059c22f6b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use crate::{handler::Challenge, node_info::NonContactable}; +use crate::{handler::Challenge, node_info::NonContactable, Topic}; use rlp::DecoderError; use std::fmt; @@ -101,6 +101,8 @@ pub enum RequestError { ChannelFailed(String), /// An invalid ENR was provided. InvalidEnr(&'static str), + /// Failed to update enr. + EnrWriteFailed, /// The remote's ENR is invalid. InvalidRemoteEnr, /// The remote returned and invalid packet. @@ -111,6 +113,32 @@ pub enum RequestError { InvalidMultiaddr(&'static str), /// Failure generating random numbers during request. EntropyFailure(&'static str), + /// Finding nodes closest to a topic hash failed. + TopicDistance(String), + /// A request that is responded with multiple responses + /// gets the wrong combination of responses. + InvalidResponseCombo(String), + /// A REGTOPIC request has sent a ticket that was not + /// issued by us. + InvalidTicket, + /// A REGTOPIC request is trying to register another node + /// than itself. + RegistrationOtherNode, + /// A REGTOPIC is not respecting the assigned wait time. + InvalidWaitTime, + /// A REGTOPIC tries to advertise a topic it does not + /// list in its enr. + InvalidEnrTopicsField, + /// The ENR can't fit the given topic into its topic field. + InsufficientSpaceEnr(Topic), + /// Neither a topic look up or registration has been done for the topic. + TopicKBucketsUninitialised, + /// The topic isn't stored in the topic query history. + TopicNotQueried, + /// The topic isn't being registered. + TopicNotRegistered, + /// The topic is already in registration. + TopicAlreadyRegistered, } #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/src/handler/active_requests.rs b/src/handler/active_requests.rs index 497c8e4a1..f488d02f8 100644 --- a/src/handler/active_requests.rs +++ b/src/handler/active_requests.rs @@ -21,7 +21,7 @@ impl ActiveRequests { } pub(crate) fn insert(&mut self, node_address: NodeAddress, request_call: RequestCall) { - let nonce = *request_call.packet.message_nonce(); + let nonce = *request_call.packet().message_nonce(); self.active_requests_mapping .insert(node_address.clone(), request_call); self.active_requests_nonce_mapping @@ -55,7 +55,7 @@ impl ActiveRequests { // Remove the associated nonce mapping. match self .active_requests_nonce_mapping - .remove(request_call.packet.message_nonce()) + .remove(request_call.packet().message_nonce()) { Some(_) => Some(request_call), None => { @@ -84,7 +84,7 @@ impl ActiveRequests { } for (address, request) in self.active_requests_mapping.iter() { - let nonce = request.packet.message_nonce(); + let nonce = request.packet().message_nonce(); if !self.active_requests_nonce_mapping.contains_key(nonce) { panic!("Address {} maps to request with nonce {:?}, which does not exist in `active_requests_nonce_mapping`", address, nonce); } @@ -99,7 +99,7 @@ impl Stream for ActiveRequests { Poll::Ready(Some(Ok((node_address, request_call)))) => { // Remove the associated nonce mapping. self.active_requests_nonce_mapping - .remove(request_call.packet.message_nonce()); + .remove(request_call.packet().message_nonce()); Poll::Ready(Some(Ok((node_address, request_call)))) } Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), diff --git a/src/handler/crypto/mod.rs b/src/handler/crypto/mod.rs index 0efa5e84b..dd66c6713 100644 --- a/src/handler/crypto/mod.rs +++ b/src/handler/crypto/mod.rs @@ -43,7 +43,7 @@ type Key = [u8; KEY_LENGTH]; /* Session key generation */ /// Generates session and auth-response keys for a nonce and remote ENR. This currently only -/// supports Secp256k1 signed ENR's. This returns four keys; initiator key, responder key, auth +/// supports Secp256k1 signed ENRs. This returns four keys; initiator key, responder key, auth /// response key and the ephemeral public key. pub(crate) fn generate_session_keys( local_id: &NodeId, @@ -407,7 +407,7 @@ mod tests { let message = decrypt_message(&key, nonce, &ciphertext, &auth_data).unwrap(); dbg!(&message); dbg!(hex::encode(&message)); - let rpc = crate::rpc::Message::decode(&message).unwrap(); + let rpc = crate::rpc::Message::decode(&message, &[0u8; 16]).unwrap(); println!("{}", rpc); } diff --git a/src/handler/mod.rs b/src/handler/mod.rs index 047b013f0..3cdc817f5 100644 --- a/src/handler/mod.rs +++ b/src/handler/mod.rs @@ -3,7 +3,7 @@ //! The [`Handler`] is responsible for establishing and maintaining sessions with //! connected/discovered nodes. Each node, identified by it's [`NodeId`] is associated with a //! `Session`. This service drives the handshakes for establishing the sessions and associated -//! logic for sending/requesting initial connections/ENR's to/from unknown peers. +//! logic for sending/requesting initial connections/ENRs to/from unknown peers. //! //! The [`Handler`] also manages the timeouts for each request and reports back RPC failures, //! and received messages. Messages are encrypted and decrypted using the @@ -27,19 +27,22 @@ //! Messages from a node on the network come by [`Socket`] and get the form of a [`HandlerOut`] //! and can be forwarded to the application layer via the send channel. use crate::{ + advertisement::topic::TopicHash, config::Discv5Config, discv5::PERMIT_BAN_LIST, error::{Discv5Error, RequestError}, packet::{ChallengeData, IdNonce, MessageNonce, Packet, PacketKind}, - rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody}, + rpc::{Message, Request, RequestBody, RequestId, Response, ResponseBody, FALSE_TICKET}, + service::ban_malicious_peer, socket, socket::{FilterConfig, Socket}, - Enr, + Enr, Topic, }; use delay_map::HashMapDelay; use enr::{CombinedKey, NodeId}; use futures::prelude::*; use parking_lot::RwLock; +use rlp::DecoderError; use std::{ collections::HashMap, convert::TryFrom, @@ -55,6 +58,7 @@ use tracing::{debug, error, trace, warn}; mod active_requests; mod crypto; +mod request_call; mod session; mod tests; @@ -64,6 +68,7 @@ use crate::metrics::METRICS; use crate::lru_time_cache::LruTimeCache; use active_requests::ActiveRequests; +use request_call::RequestCall; use session::Session; // The time interval to check banned peer timeouts and unban peers when the timeout has elapsed (in @@ -112,6 +117,9 @@ pub enum HandlerOut { /// ENR declares no `SocketAddr`. Established(Enr, SocketAddr, ConnectionDirection), + /// A session has been established for the purpose of publishing advertisements. + EstablishedTopic(Enr, ConnectionDirection, TopicHash), + /// A Request has been received from a node on the network. Request(NodeAddress, Box), @@ -151,53 +159,13 @@ pub struct Challenge { remote_enr: Option, } -/// A request to a node that we are waiting for a response. -#[derive(Debug)] -pub(crate) struct RequestCall { - contact: NodeContact, - /// The raw discv5 packet sent. - packet: Packet, - /// The unencrypted message. Required if need to re-encrypt and re-send. - request: Request, - /// Handshakes attempted. - handshake_sent: bool, - /// The number of times this request has been re-sent. - retries: u8, - /// If we receive a Nodes Response with a total greater than 1. This keeps track of the - /// remaining responses expected. - remaining_responses: Option, - /// Signifies if we are initiating the session with a random packet. This is only used to - /// determine the connection direction of the session. - initiating_session: bool, -} - -impl RequestCall { - fn new( - contact: NodeContact, - packet: Packet, - request: Request, - initiating_session: bool, - ) -> Self { - RequestCall { - contact, - packet, - request, - handshake_sent: false, - retries: 1, - remaining_responses: None, - initiating_session, - } - } - - fn id(&self) -> &RequestId { - &self.request.id - } -} - /// Process to handle handshakes and sessions established from raw RPC communications between nodes. pub struct Handler { /// Configuration for the discv5 service. request_retries: u8, + /// The duration nodes that show malicious behaviour are banned. A configuration for the + /// discv5 service. + ban_duration: Option, /// The local node id to save unnecessary read locks on the ENR. The NodeID should not change /// during the operation of the server. node_id: NodeId, @@ -205,6 +173,8 @@ pub struct Handler { enr: Arc>, /// The key to sign the ENR and set up encrypted communication with peers. key: Arc>, + /// The key used for en-/decrypting tickets. + ticket_key: [u8; 16], /// Pending raw requests. active_requests: ActiveRequests, /// The expected responses by SocketAddr which allows packets to pass the underlying filter. @@ -289,9 +259,11 @@ impl Handler { let mut handler = Handler { request_retries: config.request_retries, + ban_duration: config.ban_duration, node_id, enr, key, + ticket_key: rand::random(), active_requests: ActiveRequests::new(config.request_timeout), pending_requests: HashMap::new(), filter_expected_responses, @@ -338,6 +310,7 @@ impl Handler { self.process_inbound_packet(inbound_packet).await; } Some(Ok((node_address, pending_request))) = self.active_requests.next() => { + trace!("Discarding request {} with timeout", pending_request.kind()); self.handle_request_timeout(node_address, pending_request).await; } Some(Ok((node_address, _challenge))) = self.active_challenges.next() => { @@ -432,7 +405,10 @@ impl Handler { node_address: NodeAddress, mut request_call: RequestCall, ) { - if request_call.retries >= self.request_retries { + // NOTE: We consider it a node fault if we are waiting for a REGCONFIRMATION and we receive + // a timeout. We should only be waiting for a REGCONFIRMATION if we know one should be + // coming. + if request_call.retries() >= self.request_retries { trace!("Request timed out with {}", node_address); // Remove the request from the awaiting packet_filter self.remove_expected_response(node_address.socket_addr); @@ -443,12 +419,12 @@ impl Handler { // increment the request retry count and restart the timeout trace!( "Resending message: {} to {}", - request_call.request, + request_call.raw_request(), node_address ); - self.send(node_address.clone(), request_call.packet.clone()) + self.send(node_address.clone(), request_call.packet().clone()) .await; - request_call.retries += 1; + request_call.retry(); self.active_requests.insert(node_address, request_call); } } @@ -512,13 +488,14 @@ impl Handler { // Check for an established session if let Some(session) = self.sessions.get_mut(&node_address) { // Encrypt the message and send - let packet = match session.encrypt_message(self.node_id, &response.encode()) { - Ok(packet) => packet, - Err(e) => { - warn!("Could not encrypt response: {:?}", e); - return; - } - }; + let packet = + match session.encrypt_message(self.node_id, &response.encode(&self.ticket_key)) { + Ok(packet) => packet, + Err(e) => { + warn!("Could not encrypt response: {:?}", e); + return; + } + }; self.send(node_address, packet).await; } else { // Either the session is being established or has expired. We simply drop the @@ -599,26 +576,26 @@ impl Handler { }; // double check the message nonces match - if request_call.packet.message_nonce() != &request_nonce { + if request_call.packet().message_nonce() != &request_nonce { // This could theoretically happen if a peer uses the same node id across // different connections. - warn!("Received a WHOAREYOU from a non expected source. Source: {}, message_nonce {} , expected_nonce: {}", request_call.contact, hex::encode(request_call.packet.message_nonce()), hex::encode(request_nonce)); + warn!("Received a WHOAREYOU from a non expected source. Source: {}, message_nonce {} , expected_nonce: {}", request_call.contact(), hex::encode(request_call.packet().message_nonce()), hex::encode(request_nonce)); // NOTE: Both mappings are removed in this case. return; } trace!( "Received a WHOAREYOU packet response. Source: {}", - request_call.contact + request_call.contact() ); // We do not allow multiple WHOAREYOU packets for a single challenge request. If we have // already sent a WHOAREYOU ourselves, we drop sessions who send us a WHOAREYOU in // response. - if request_call.handshake_sent { + if request_call.handshake_sent() { warn!( "Authentication response already sent. Dropping session. Node: {}", - request_call.contact + request_call.contact() ); self.fail_request(request_call, RequestError::InvalidRemotePacket, true) .await; @@ -636,12 +613,12 @@ impl Handler { // Generate a new session and authentication packet let (auth_packet, mut session) = match Session::encrypt_with_header( - &request_call.contact, + request_call.contact(), self.key.clone(), updated_enr, &self.node_id, &challenge_data, - &(request_call.request.clone().encode()), + &(request_call.raw_request().clone().encode()), ) { Ok(v) => v, Err(e) => { @@ -665,49 +642,55 @@ impl Handler { // // All sent requests must have an associated node_id. Therefore the following // must not panic. - let node_address = request_call.contact.node_address(); - match request_call.contact.enr() { + let node_address = request_call.contact().node_address(); + match request_call.contact().enr() { Some(enr) => { // NOTE: Here we decide if the session is outgoing or ingoing. The condition for an // outgoing session is that we originally sent a RANDOM packet (signifying we did // not have a session for a request) and the packet is not a PING (we are not // trying to update an old session that may have expired. let connection_direction = { - match (&request_call.initiating_session, &request_call.request.body) { + match (request_call.initiating_session(), request_call.kind()) { (true, RequestBody::Ping { .. }) => ConnectionDirection::Incoming, (true, _) => ConnectionDirection::Outgoing, (false, _) => ConnectionDirection::Incoming, } }; + // Notify the application that the session has been established + let event = match request_call.kind() { + RequestBody::RegisterTopic { topic, ticket: _ } => { + let topic_hash = Topic::new(topic).hash(); + HandlerOut::EstablishedTopic(enr, connection_direction, topic_hash) + } + RequestBody::TopicQuery { topic } => { + HandlerOut::EstablishedTopic(enr, connection_direction, *topic) + } + _ => { + HandlerOut::Established(enr, node_address.socket_addr, connection_direction) + } + }; + // We already know the ENR. Send the handshake response packet trace!("Sending Authentication response to node: {}", node_address); - request_call.packet = auth_packet.clone(); - request_call.handshake_sent = true; - request_call.initiating_session = false; + request_call.upgrade_to_auth_packet(auth_packet.clone()); + request_call.set_initiating_session(false); // Reinsert the request_call self.insert_active_request(request_call); // Send the actual packet to the send task. self.send(node_address.clone(), auth_packet).await; - // Notify the application that the session has been established self.service_send - .send(HandlerOut::Established( - enr, - node_address.socket_addr, - connection_direction, - )) + .send(event) .await .unwrap_or_else(|e| warn!("Error with sending channel: {}", e)); } None => { // Don't know the ENR. Establish the session, but request an ENR also - // Send the Auth response - let contact = request_call.contact.clone(); + let contact = request_call.contact().clone(); trace!("Sending Authentication response to node: {}", node_address); - request_call.packet = auth_packet.clone(); - request_call.handshake_sent = true; + request_call.upgrade_to_auth_packet(auth_packet.clone()); // Reinsert the request_call self.insert_active_request(request_call); self.send(node_address.clone(), auth_packet).await; @@ -882,10 +865,21 @@ impl Handler { // attempt to decrypt and process the message. let message = match session.decrypt_message(message_nonce, message, authenticated_data) { - Ok(m) => match Message::decode(&m) { + Ok(m) => match Message::decode(&m, &self.ticket_key) { Ok(p) => p, Err(e) => { warn!("Failed to decode message. Error: {:?}, {}", e, node_address); + if let DecoderError::Custom(FALSE_TICKET) = e { + warn!("Node sent a ticket that couldn't be decrypted with local ticket key. Blacklisting peer {}", node_address.node_id); + ban_malicious_peer(self.ban_duration, node_address.clone()); + self.fail_session( + &node_address, + RequestError::InvalidRemotePacket, + true, + ) + .await; + return; + } return; } }, @@ -971,6 +965,7 @@ impl Handler { return; } } + trace!("Handling a {} response", response.body); // Handle standard responses self.handle_response(node_address, response).await; } @@ -998,56 +993,39 @@ impl Handler { /// Nodes response. async fn handle_response(&mut self, node_address: NodeAddress, response: Response) { // Find a matching request, if any + trace!("Received {} response", response.body); + if let Some(mut request_call) = self.active_requests.remove(&node_address) { if request_call.id() != &response.id { + // add the request back and reset the timer trace!( "Received an RPC Response to an unknown request. Likely late response. {}", node_address ); - // add the request back and reset the timer self.active_requests.insert(node_address, request_call); return; } // The response matches a request - // Check to see if this is a Nodes response, in which case we may require to wait for - // extra responses - if let ResponseBody::Nodes { total, .. } = response.body { - if total > 1 { - // This is a multi-response Nodes response - if let Some(remaining_responses) = request_call.remaining_responses.as_mut() { - *remaining_responses -= 1; - if remaining_responses != &0 { - // more responses remaining, add back the request and send the response - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } - return; - } - } else { - // This is the first instance - request_call.remaining_responses = Some(total - 1); - // add back the request and send the response - self.active_requests - .insert(node_address.clone(), request_call); - if let Err(e) = self - .service_send - .send(HandlerOut::Response(node_address, Box::new(response))) - .await - { - warn!("Failed to inform of response {}", e) - } + // Check to see if the matching request requires us to wait for extra responses. + match response.body { + ResponseBody::Nodes { total, .. } => { + // Update the request call state and if there are no more messages expected, remove + // the request + if request_call.register_nodes_response(total) { + // This is a multi-response Nodes response + trace!("Reinserting active request"); + self.reinsert_request(node_address, request_call, response) + .await; return; } } + ResponseBody::Pong { .. } + | ResponseBody::Talk { .. } + | ResponseBody::Ticket { .. } => { + // These are all associated with a single response + } } // Remove the expected response @@ -1072,9 +1050,32 @@ impl Handler { } } + /// A helper function used in `handle_response` to re-insert a request_call and await another + /// response, whilst sending the response back to the service. + async fn reinsert_request( + &mut self, + node_address: NodeAddress, + request_call: RequestCall, + response: Response, + ) { + // There are more messages to be received + trace!("Reinserting active request"); + // more responses remaining, add back the request and send the response + // add back the request and send the response + self.active_requests + .insert(node_address.clone(), request_call); + if let Err(e) = self + .service_send + .send(HandlerOut::Response(node_address, Box::new(response))) + .await + { + warn!("Failed to inform of response {}", e) + } + } + /// Inserts a request and associated auth_tag mapping. fn insert_active_request(&mut self, request_call: RequestCall) { - let node_address = request_call.contact.node_address(); + let node_address = request_call.contact().node_address(); // adds the mapping of message nonce to node address self.active_requests.insert(node_address, request_call); @@ -1100,16 +1101,16 @@ impl Handler { ) { // The Request has expired, remove the session. // Fail the current request - let request_id = request_call.request.id; + let request_id = request_call.id(); if let Err(e) = self .service_send - .send(HandlerOut::RequestFailed(request_id, error.clone())) + .send(HandlerOut::RequestFailed(request_id.clone(), error.clone())) .await { warn!("Failed to inform request failure {}", e) } - let node_address = request_call.contact.node_address(); + let node_address = request_call.contact().node_address(); self.fail_session(&node_address, error, remove_session) .await; } diff --git a/src/handler/request_call.rs b/src/handler/request_call.rs new file mode 100644 index 000000000..e4f224b52 --- /dev/null +++ b/src/handler/request_call.rs @@ -0,0 +1,123 @@ +use super::*; + +/// The maximum number of NODES responses we allow at the handler level. +const MAX_NODES_RESPONSES: u64 = 5; + +/// A request to a node that we are waiting for a response. +#[derive(Debug)] +pub(crate) struct RequestCall { + contact: NodeContact, + /// The raw discv5 packet sent. + packet: Packet, + /// The unencrypted message. Required if need to re-encrypt and re-send. + request: Request, + /// Handshakes attempted. + handshake_sent: bool, + /// The number of times this request has been re-sent. + retries: u8, + /// A NODES response can span multiple datagrams. If we are receiving multiple NODES responses, + /// this tracks the number of datagrams we are still expecting. + awaiting_nodes: Option, + /// Signifies if we are initiating the session with a random packet. This is only used to + /// determine the connection direction of the session. + initiating_session: bool, +} + +impl RequestCall { + pub fn new( + contact: NodeContact, + packet: Packet, + request: Request, + initiating_session: bool, + ) -> Self { + RequestCall { + contact, + packet, + request, + handshake_sent: false, + retries: 1, + awaiting_nodes: None, + initiating_session, + } + } + + /// Increments the retry count. + pub fn retry(&mut self) { + self.retries = self.retries.saturating_add(1); + } + + /// We are now sending an authentication response to the node. The packet is being upgraded to + /// an authentication packet. + pub fn upgrade_to_auth_packet(&mut self, packet: Packet) { + self.packet = packet; + self.handshake_sent = true; + } + + /// Sets the initiating_session flag. + pub fn set_initiating_session(&mut self, initiating_session: bool) { + self.initiating_session = initiating_session; + } + + /// We have received a NODES response, with a given total. + /// If we require further messages, update the state of the [`RequestCall`]. If this request + /// has more messages to be received, this function returns true. + pub fn register_nodes_response(&mut self, total: u64) -> bool { + if total > 1 && total <= MAX_NODES_RESPONSES { + if let Some(mut remaining) = self.awaiting_nodes { + remaining = remaining.saturating_sub(1); + if remaining == 0 { + // Change the state so that `register_ticket` can be informed we are no longer + // waiting for messages + self.awaiting_nodes = None; + } else { + return true; // still waiting for more messages + } + } else { + // This is the first instance + self.awaiting_nodes = Some(total - 1); + return true; // still waiting for more messages + } + } + false // This was a single NODES response and we have no interest in waiting for more messages. + } + + /// Returns the request ID associated with the [`RequestCall`]. + pub fn id(&self) -> &RequestId { + &self.request.id + } + + /// Returns the raw request. + pub fn raw_request(&self) -> &Request { + &self.request + } + + /// Returns the raw packet of the request + pub fn packet(&self) -> &Packet { + &self.packet + } + + /// The destination contact for this request. + pub fn contact(&self) -> &NodeContact { + &self.contact + } + + /// Returns the [`RequestBody`] associated with the [`RequestCall`]. + pub fn kind(&self) -> &RequestBody { + &self.request.body + } + + /// Returns the number of retries this request has undertaken. + pub fn retries(&self) -> u8 { + self.retries + } + + /// Whether we have sent a handshake or not. + pub fn handshake_sent(&self) -> bool { + self.handshake_sent + } + + /// Whether our node is the one that is initiating the session. + pub fn initiating_session(&self) -> bool { + self.initiating_session + } +} diff --git a/src/handler/tests.rs b/src/handler/tests.rs index 91fbac890..9b9114206 100644 --- a/src/handler/tests.rs +++ b/src/handler/tests.rs @@ -247,7 +247,7 @@ async fn test_active_requests_insert() { let request_call = RequestCall::new(contact, packet, request, initiating_session); // insert the pair and verify the mapping remains in sync - let nonce = *request_call.packet.message_nonce(); + let nonce = *request_call.packet().message_nonce(); active_requests.insert(node_address, request_call); active_requests.check_invariant(); active_requests.remove_by_nonce(&nonce); diff --git a/src/ipmode.rs b/src/ipmode.rs index e95abd409..0817f20fb 100644 --- a/src/ipmode.rs +++ b/src/ipmode.rs @@ -4,7 +4,7 @@ use std::net::SocketAddr; /// Sets the socket type to be established and also determines the type of ENRs that we will store /// in our routing table. -/// We store ENR's that have a `get_contractable_addr()` based on the `IpMode` set. +/// We store ENRs that have a `get_contractable_addr()` based on the `IpMode` set. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum IpMode { /// IPv4 only. This creates an IPv4 only UDP socket and will only store ENRs in the local @@ -237,7 +237,7 @@ mod tests { } } -/// Copied from the standard library. See https://github.com/rust-lang/rust/issues/27709 +/// Copied from the standard library. See /// The current code is behind the `ip` feature. pub const fn to_ipv4_mapped(ip: &std::net::Ipv6Addr) -> Option { match ip.octets() { diff --git a/src/kbucket.rs b/src/kbucket.rs index 1b7a3e8ac..3c824c71e 100644 --- a/src/kbucket.rs +++ b/src/kbucket.rs @@ -499,8 +499,10 @@ where } } - /// Returns an iterator over all the entries in the routing table. - pub fn iter(&mut self) -> impl Iterator> { + /// Returns an iterator over all the entries in the routing table, which will be ordered + /// in increasing order by distance since the buckets are stored in a vector to which + /// the are added in increasing order. + pub fn iter(&mut self) -> impl DoubleEndedIterator> { let applied_pending = &mut self.applied_pending; self.buckets.iter_mut().flat_map(move |table| { if let Some(applied) = table.apply_pending() { diff --git a/src/kbucket/bucket.rs b/src/kbucket/bucket.rs index 17a6ab295..f89c06c6b 100644 --- a/src/kbucket/bucket.rs +++ b/src/kbucket/bucket.rs @@ -217,7 +217,7 @@ pub enum FailureReason { /// The node didn't pass the table filter. TableFilter, /// The node didn't exist. - KeyNonExistant, + KeyNonExistent, /// The bucket was full. BucketFull, /// Cannot update self, @@ -273,7 +273,7 @@ where } /// Returns an iterator over the nodes in the bucket, together with their status. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl DoubleEndedIterator> { self.nodes.iter() } @@ -467,10 +467,10 @@ where } UpdateResult::UpdatedPending } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } @@ -506,10 +506,10 @@ where pending.node.value = value; UpdateResult::UpdatedPending } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } else { - UpdateResult::Failed(FailureReason::KeyNonExistant) + UpdateResult::Failed(FailureReason::KeyNonExistent) } } diff --git a/src/lib.rs b/src/lib.rs index a2864b26d..3dd179ab8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,7 +29,7 @@ //! needed and get dropped after a timeout. This section manages the creation and maintenance of //! sessions between nodes and the encryption/decryption of packets from the socket. It is realised by the [`handler::Handler`] struct and it runs in its own task. //! * Service - This section contains the protocol-level logic. In particular it manages the -//! routing table of known ENR's, topic registration/advertisement and performs various queries +//! routing table of known ENRs, topic registration/advertisement and performs various queries //! such as peer discovery. This section is realised by the [`Service`] struct. This also runs in //! it's own thread. //! * Application - This section is the user-facing API which can start/stop the underlying @@ -102,6 +102,7 @@ //! [`Service`]: service/struct.Service.html //! [`Session`]: session/struct.Session.html +pub mod advertisement; mod config; mod discv5; mod error; @@ -123,8 +124,11 @@ pub mod socket; extern crate lazy_static; pub type Enr = enr::Enr; +pub type Topic = crate::advertisement::topic::Sha256Topic; +pub type TopicsEnrField = + crate::advertisement::topic::TopicsEnrField; -pub use crate::discv5::{Discv5, Discv5Event}; +pub use crate::discv5::{Discv5, Discv5Event, HASH}; pub use config::{Discv5Config, Discv5ConfigBuilder}; pub use error::{Discv5Error, QueryError, RequestError, ResponseError}; pub use executor::{Executor, TokioExecutor}; diff --git a/src/metrics.rs b/src/metrics.rs index 2e6f2fc9a..1f2b1c0cd 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -16,6 +16,12 @@ pub struct InternalMetrics { pub bytes_sent: AtomicUsize, /// The number of bytes received. pub bytes_recv: AtomicUsize, + /// The number of topics to attempt advertising on other nodes. + pub topics_to_publish: AtomicUsize, + /// The number of ads currently advertised locally for other nodes. + pub hosted_ads: AtomicUsize, + /// The number of active regtopic requests awaiting a REGCONFIRMATION response. + pub active_regtopic_req: AtomicUsize, } impl Default for InternalMetrics { @@ -26,6 +32,9 @@ impl Default for InternalMetrics { unsolicited_requests_per_window: AtomicUsize::new(0), bytes_sent: AtomicUsize::new(0), bytes_recv: AtomicUsize::new(0), + topics_to_publish: AtomicUsize::new(0), + hosted_ads: AtomicUsize::new(0), + active_regtopic_req: AtomicUsize::new(0), } } } @@ -55,6 +64,12 @@ pub struct Metrics { pub bytes_sent: usize, /// The number of bytes received. pub bytes_recv: usize, + /// The number of topics to attempt advertising on other nodes. + pub topics_to_publish: usize, + /// The number of ads currently advertised locally for other nodes. + pub hosted_ads: usize, + /// The number of active regtopic requests. + pub active_regtopic_req: usize, } impl From<&METRICS> for Metrics { @@ -67,6 +82,9 @@ impl From<&METRICS> for Metrics { / internal_metrics.moving_window as f64, bytes_sent: internal_metrics.bytes_sent.load(Ordering::Relaxed), bytes_recv: internal_metrics.bytes_recv.load(Ordering::Relaxed), + topics_to_publish: internal_metrics.topics_to_publish.load(Ordering::Relaxed), + hosted_ads: internal_metrics.hosted_ads.load(Ordering::Relaxed), + active_regtopic_req: internal_metrics.active_regtopic_req.load(Ordering::Relaxed), } } } diff --git a/src/query_pool/peers/predicate.rs b/src/query_pool/peers/predicate.rs index 4768a1c35..c3258cfd2 100644 --- a/src/query_pool/peers/predicate.rs +++ b/src/query_pool/peers/predicate.rs @@ -21,7 +21,7 @@ pub(crate) struct PredicateQuery { /// The number of peers for which the query is currently waiting for results. num_waiting: usize, - /// The predicate function to be applied to filter the ENR's found during the search. + /// The predicate function to be applied to filter the ENRs found during the search. predicate: Box bool + Send + 'static>, /// The configuration of the query. diff --git a/src/rpc.rs b/src/rpc.rs index e66b3bb91..276b0f89f 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -1,9 +1,159 @@ -use enr::{CombinedKey, Enr}; -use rlp::{DecoderError, RlpStream}; -use std::net::{IpAddr, Ipv6Addr}; -use tracing::{debug, warn}; +use crate::{advertisement::topic::TopicHash, Enr}; +use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, +}; +use enr::NodeId; +use more_asserts::debug_unreachable; +use rlp::{DecoderError, Rlp, RlpStream}; +use std::{ + net::{IpAddr, Ipv6Addr}, + time::{SystemTime, UNIX_EPOCH}, +}; +use tokio::time::{Duration, Instant}; +use tracing::{debug, error, warn}; + +pub const FALSE_TICKET: &str = "TICKET_ENCRYPTED_BY_FOREIGN_KEY"; + +/// A ticket contained in the body of a REGTOPIC request. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum RequestTicket { + /// If this REGTOPIC is the first being sent to a given peer, no + /// ticket will be at hand. + Empty, + /// This is an incoming REGTOPIC request with a ticket this node + /// issued to the sender at the prior registration attempt. + LocallyIssued(Ticket), + /// This is an outgoing REGTOPIC request returning a ticket + /// received from the recipient at the prior registration + /// attempt. + RemotelyIssued(Vec), +} + +impl RequestTicket { + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(ticket: &[u8]) -> Result { + let rlp = rlp::Rlp::new(ticket); + let request_ticket = rlp.as_val::()?; + Ok(request_ticket) + } +} + +impl rlp::Encodable for RequestTicket { + fn rlp_append(&self, s: &mut RlpStream) { + match self { + RequestTicket::Empty => { + s.append(&Vec::new()); + } + RequestTicket::LocallyIssued(ticket) => { + debug!("A locally issued ticket will never be sent in the form of a request hence the RequestTicket::LocallyIssued variant should not need to be encoded. This functionality should merely be invoked by tests."); + s.append(ticket); + } + RequestTicket::RemotelyIssued(bytes) => { + // A remotely issued ticket is encoded to return it to its issuer once its wait + // time expires. + s.append(bytes); + } + } + } +} + +impl rlp::Decodable for RequestTicket { + fn decode(rlp: &Rlp<'_>) -> Result { + // If a ticket is incoming in a REGTOPIC request, and we hence decode + // the request, it should only be a ticket that was locally issued. A + // remotely issued ticket RegtopicTicket::Remote will only be encoded + // by this node to return it to its issuer. + Ok(RequestTicket::LocallyIssued(rlp.as_val::()?)) + } +} + +impl std::fmt::Display for RequestTicket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RequestTicket::Empty => { + write!(f, "Empty") + } + RequestTicket::LocallyIssued(ticket) => { + write!(f, "Locally issued ticket: {}", ticket) + } + RequestTicket::RemotelyIssued(bytes) => { + write!(f, "Remotely issued ticket: {}", hex::encode(bytes)) + } + } + } +} + +/// A ticket contained in the body of a TICKET response. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ResponseTicket { + /// This is an outgoing TICKET response containing a locally + /// assembled ticket. + LocallyIssued(Ticket), + /// This is an incoming TICKET response containing a ticket + /// issued by the sender. + RemotelyIssued(Vec), +} + +impl ResponseTicket { + pub fn encode(&self) -> Vec { + let mut buf = Vec::new(); + let mut s = RlpStream::new(); + s.append(self); + buf.extend_from_slice(&s.out()); + buf + } + + pub fn decode(ticket: &[u8]) -> Result { + let rlp = rlp::Rlp::new(ticket); + let response_ticket = rlp.as_val::()?; + Ok(response_ticket) + } +} + +impl rlp::Encodable for ResponseTicket { + fn rlp_append(&self, s: &mut RlpStream) { + match self { + ResponseTicket::LocallyIssued(ticket) => { + s.append(ticket); + } + ResponseTicket::RemotelyIssued(bytes) => { + debug!("A remotely issued ticket will never be returned to the issuer in the form of a response hence the ResponseTicket::RemotelyIssued variant should not need to be encoded. This functionality should merely be invoked by tests."); + s.append(bytes); + } + } + } +} -type TopicHash = [u8; 32]; +impl rlp::Decodable for ResponseTicket { + fn decode(rlp: &Rlp<'_>) -> Result { + // If a ticket is incoming in a TICKET response, and we hence decode + // the response, it should only be a ticket that was remotely issued. + // A locally issued ticket ResponseTicket::Local will only be encoded + // by this node and sent to a given peer. + Ok(ResponseTicket::RemotelyIssued(rlp.as_val::>()?)) + } +} + +impl std::fmt::Display for ResponseTicket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ResponseTicket::LocallyIssued(ticket) => { + write!(f, "Locally issued ticket: {}", ticket) + } + ResponseTicket::RemotelyIssued(bytes) => { + write!(f, "Remotely issued ticket: {}", hex::encode(bytes)) + } + } + } +} /// Type to manage the request IDs. #[derive(Debug, Clone, PartialEq, Hash, Eq)] @@ -73,21 +223,25 @@ pub enum RequestBody { /// The distance(s) of peers we expect to be returned in the response. distances: Vec, }, - /// A Talk request. + /// A TALKREQ request. Talk { /// The protocol requesting. protocol: Vec, /// The request. request: Vec, }, - /// A REGISTERTOPIC request. + /// A REGTOPIC request. RegisterTopic { - topic: Vec, - enr: crate::Enr, - ticket: Vec, + /// The topic string we want to advertise at the node receiving this request. + topic: String, + // Ticket content of ticket from a previous registration attempt or empty. + ticket: RequestTicket, }, /// A TOPICQUERY request. - TopicQuery { topic: TopicHash }, + TopicQuery { + /// The hashed topic we want NODES response(s) for. + topic: TopicHash, + }, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -101,24 +255,26 @@ pub enum ResponseBody { /// Our external UDP port as observed by the responder. port: u16, }, - /// A NODES response. + /// A NODES response to a FINDNODE or TOPICQUERY request. Nodes { /// The total number of responses that make up this response. total: u64, - /// A list of ENR's returned by the responder. - nodes: Vec>, + /// A list of ENRs returned by the responder. + nodes: Vec, }, - /// The TALK response. + /// The TALKRESP response. Talk { - /// The response for the talk. + /// The response for the TALKREQ request. response: Vec, }, + /// The TICKET response. Ticket { - ticket: Vec, + /// The response to a REGTOPIC request. + ticket: ResponseTicket, + /// The time in seconds to wait before attempting to register again. wait_time: u64, - }, - RegisterConfirmation { - topic: Vec, + /// The topic hash for which the opaque ticket is issued. + topic: String, }, } @@ -129,7 +285,7 @@ impl Request { RequestBody::FindNode { .. } => 3, RequestBody::Talk { .. } => 5, RequestBody::RegisterTopic { .. } => 7, - RequestBody::TopicQuery { .. } => 10, + RequestBody::TopicQuery { .. } => 9, } } @@ -168,12 +324,11 @@ impl Request { buf.extend_from_slice(&s.out()); buf } - RequestBody::RegisterTopic { topic, enr, ticket } => { + RequestBody::RegisterTopic { topic, ticket } => { let mut s = RlpStream::new(); s.begin_list(4); s.append(&id.as_bytes()); s.append(&topic); - s.append(&enr); s.append(&ticket); buf.extend_from_slice(&s.out()); buf @@ -182,7 +337,7 @@ impl Request { let mut s = RlpStream::new(); s.begin_list(2); s.append(&id.as_bytes()); - s.append(&(&topic as &[u8])); + s.append(&topic); buf.extend_from_slice(&s.out()); buf } @@ -197,7 +352,6 @@ impl Response { ResponseBody::Nodes { .. } => 4, ResponseBody::Talk { .. } => 6, ResponseBody::Ticket { .. } => 8, - ResponseBody::RegisterConfirmation { .. } => 9, } } @@ -213,14 +367,11 @@ impl Response { } ResponseBody::Talk { .. } => matches!(req, RequestBody::Talk { .. }), ResponseBody::Ticket { .. } => matches!(req, RequestBody::RegisterTopic { .. }), - ResponseBody::RegisterConfirmation { .. } => { - matches!(req, RequestBody::RegisterTopic { .. }) - } } } /// Encodes a Message to RLP-encoded bytes. - pub fn encode(self) -> Vec { + pub fn encode(self, ticket_key: &[u8; 16]) -> Vec { let mut buf = Vec::with_capacity(10); let msg_type = self.msg_type(); buf.push(msg_type); @@ -264,21 +415,27 @@ impl Response { buf.extend_from_slice(&s.out()); buf } - ResponseBody::Ticket { ticket, wait_time } => { - let mut s = RlpStream::new(); - s.begin_list(3); - s.append(&id.as_bytes()); - s.append(&ticket); - s.append(&wait_time); - buf.extend_from_slice(&s.out()); - buf - } - ResponseBody::RegisterConfirmation { topic } => { - let mut s = RlpStream::new(); - s.begin_list(2); - s.append(&id.as_bytes()); - s.append(&topic); - buf.extend_from_slice(&s.out()); + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket.encode(), + aad: b"", + }; + if let Ok(encrypted_ticket) = + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + { + let mut s = RlpStream::new(); + s.begin_list(4); + s.append(&id.as_bytes()); + s.append(&encrypted_ticket); + s.append(&wait_time); + s.append(&topic); + buf.extend_from_slice(&s.out()); + } buf } } @@ -311,11 +468,11 @@ impl std::fmt::Display for ResponseBody { match self { ResponseBody::Pong { enr_seq, ip, port } => write!( f, - "PONG: Enr-seq: {}, Ip: {:?}, Port: {}", + "PONG: enr-seq: {}, ip: {:?}, port: {}", enr_seq, ip, port ), ResponseBody::Nodes { total, nodes } => { - write!(f, "NODES: total: {}, Nodes: [", total)?; + write!(f, "NODES: total: {}, nodes: [", total)?; let mut first = true; for id in nodes { if !first { @@ -329,13 +486,18 @@ impl std::fmt::Display for ResponseBody { write!(f, "]") } ResponseBody::Talk { response } => { - write!(f, "Response: Response {}", hex::encode(response)) - } - ResponseBody::Ticket { ticket, wait_time } => { - write!(f, "TICKET: Ticket: {:?}, Wait time: {}", ticket, wait_time) + write!(f, "TALK: response {}", hex::encode(response)) } - ResponseBody::RegisterConfirmation { topic } => { - write!(f, "REGTOPIC: Registered: {}", hex::encode(topic)) + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { + write!( + f, + "TICKET: ticket: {}, wait time: {}, topic: {}", + ticket, wait_time, topic + ) } } } @@ -360,27 +522,23 @@ impl std::fmt::Display for RequestBody { hex::encode(protocol), hex::encode(request) ), - RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {:?}", topic), - RequestBody::RegisterTopic { topic, enr, ticket } => write!( - f, - "RegisterTopic: topic: {}, enr: {}, ticket: {}", - hex::encode(topic), - enr.to_base64(), - hex::encode(ticket) - ), + RequestBody::TopicQuery { topic } => write!(f, "TOPICQUERY: topic: {}", topic), + RequestBody::RegisterTopic { topic, ticket } => { + write!(f, "REGTOPIC: topic: {}, ticket: {}", topic, ticket,) + } } } } #[allow(dead_code)] impl Message { - pub fn encode(self) -> Vec { + pub fn encode(self, ticket_key: &[u8; 16]) -> Vec { match self { Self::Request(request) => request.encode(), - Self::Response(response) => response.encode(), + Self::Response(response) => response.encode(ticket_key), } } - pub fn decode(data: &[u8]) -> Result { + pub fn decode(data: &[u8], ticket_key: &[u8; 16]) -> Result { if data.len() < 3 { return Err(DecoderError::RlpIsTooShort); } @@ -417,10 +575,10 @@ impl Message { }) } 2 => { - // PingResponse + // PongResponse if list_len != 4 { debug!( - "Ping Response has an invalid RLP list length. Expected 4, found {}", + "Pong Response has an invalid RLP list length. Expected 4, found {}", list_len ); return Err(DecoderError::RlpIncorrectListLen); @@ -444,7 +602,7 @@ impl Message { } } _ => { - debug!("Ping Response has incorrect byte length for IP"); + debug!("Pong Response has incorrect byte length for IP"); return Err(DecoderError::RlpIncorrectListLen); } }; @@ -507,7 +665,7 @@ impl Message { // no records vec![] } else { - enr_list_rlp.as_list::>()? + enr_list_rlp.as_list::()? } }; Message::Response(Response { @@ -519,7 +677,7 @@ impl Message { }) } 5 => { - // Talk Request + // TalkRequest if list_len != 3 { debug!( "Talk Request has an invalid RLP list length. Expected 3, found {}", @@ -535,7 +693,7 @@ impl Message { }) } 6 => { - // Talk Response + // TalkResponse if list_len != 2 { debug!( "Talk Response has an invalid RLP list length. Expected 2, found {}", @@ -549,74 +707,315 @@ impl Message { body: ResponseBody::Talk { response }, }) } + 7 => { + // RegisterTopicRequest + if list_len != 3 { + debug!("RegisterTopic request has an invalid RLP list length. Expected 3, found {}", list_len); + return Err(DecoderError::RlpIncorrectListLen); + } + let topic = rlp.val_at::(1)?; + let ticket = rlp.val_at::>(2)?; + + let returned_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(ticket_key)); + let payload = Payload { + msg: &ticket, + aad: b"", + }; + if !ticket.is_empty() { + if let Ok(decrypted_ticket) = aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload).map_err(|e| debug!("Failed to decrypt ticket in REGTOPIC request. Ticket not issued by us. Error: {}", e)) { + if let Ok(decoded_ticket) = RequestTicket::decode(&decrypted_ticket).map_err(|e| { + debug!("Failed to decode ticket in REGTOPIC request. Error: {}", e) + }) { + decoded_ticket + } else { + debug_unreachable!("Encoding of ticket issued locally is faulty"); + return Err(DecoderError::Custom("Faulty encoding of ticket")); + } + } else { + return Err(DecoderError::Custom(FALSE_TICKET)); + } + } else { + RequestTicket::Empty + } + }; + Message::Request(Request { + id, + body: RequestBody::RegisterTopic { + topic, + ticket: returned_ticket, + }, + }) + } + 8 => { + // TicketResponse + if list_len != 4 { + debug!( + "Ticket Response has an invalid RLP list length. Expected 4, found {}", + list_len + ); + return Err(DecoderError::RlpIncorrectListLen); + } + let ticket = rlp.val_at::(1)?; + let wait_time = rlp.val_at::(2)?; + let topic = rlp.val_at::(3)?; + Message::Response(Response { + id, + body: ResponseBody::Ticket { + ticket, + wait_time, + topic, + }, + }) + } + 9 => { + // TopicQueryRequest + if list_len != 2 { + debug!( + "TopicQuery request has an invalid RLP list length. Expected 2, found {}", + list_len + ); + return Err(DecoderError::RlpIncorrectListLen); + } + let topic = { + let topic_bytes = rlp.val_at::>(1)?; + if topic_bytes.len() > 32 { + debug!("TopicQuery request has a topic greater than 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut topic = [0u8; 32]; + topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); + TopicHash::from_raw(topic) + }; + Message::Request(Request { + id, + body: RequestBody::TopicQuery { topic }, + }) + } _ => { return Err(DecoderError::Custom("Unknown RPC message type")); - } /* - * All other RPC messages are currently not supported as per the 5.1 specification. - - 7 => { - // RegisterTopicRequest - if list_len != 2 { - debug!("RegisterTopic Request has an invalid RLP list length. Expected 2, found {}", list_len); - return Err(DecoderError::RlpIncorrectListLen); - } - let ticket = rlp.val_at::>(1)?; - Message::Request(Request { - id, - body: RequestBody::RegisterTopic { ticket }, - }) - } - 8 => { - // RegisterTopicResponse - if list_len != 2 { - debug!("RegisterTopic Response has an invalid RLP list length. Expected 2, found {}", list_len); - return Err(DecoderError::RlpIncorrectListLen); - } - Message::Response(Response { - id, - body: ResponseBody::RegisterTopic { - registered: rlp.val_at::(1)?, - }, - }) - } - 9 => { - // TopicQueryRequest - if list_len != 2 { - debug!( - "TopicQuery Request has an invalid RLP list length. Expected 2, found {}", - list_len - ); - return Err(DecoderError::RlpIncorrectListLen); - } - let topic = { - let topic_bytes = rlp.val_at::>(1)?; - if topic_bytes.len() > 32 { - debug!("Ticket Request has a topic greater than 32 bytes"); - return Err(DecoderError::RlpIsTooBig); - } - let mut topic = [0u8; 32]; - topic[32 - topic_bytes.len()..].copy_from_slice(&topic_bytes); - topic - }; - Message::Request(Request { - id, - body: RequestBody::TopicQuery { topic }, - }) - } - */ + } }; Ok(message) } } +/// A ticket object, outlined in the spec. +#[derive(Debug, Clone, Eq)] +pub struct Ticket { + src_node_id: NodeId, + src_ip: IpAddr, + topic: TopicHash, + req_time: Instant, + wait_time: Duration, + //cum_wait: Duration, +} + +impl rlp::Encodable for Ticket { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(5); + s.append(&self.src_node_id.raw().to_vec()); + match self.src_ip { + IpAddr::V4(addr) => s.append(&(addr.octets().to_vec())), + IpAddr::V6(addr) => s.append(&(addr.octets().to_vec())), + }; + s.append(&self.topic); + if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + let time_since_req = self.req_time.elapsed(); + let time_stamp = time_since_unix - time_since_req; + s.append(&time_stamp.as_secs().to_be_bytes().to_vec()); + } + s.append(&self.wait_time.as_secs().to_be_bytes().to_vec()); + //s.append(&self.cum_wait.as_secs().to_be_bytes().to_vec()); + } +} + +impl rlp::Decodable for Ticket { + fn decode(rlp: &Rlp<'_>) -> Result { + if !rlp.is_list() { + debug!("Failed to decode ENR. Not an RLP list: {}", rlp); + return Err(DecoderError::RlpExpectedToBeList); + } + + if rlp.item_count() != Ok(5) { + error!( + "List has wrong item count, should be 5 but is {:?}", + rlp.item_count() + ); + return Err(DecoderError::Custom("List has wrong item count")); + } + + let mut decoded_list: Vec> = rlp.iter().collect(); + + let src_node_id = { + let data = decoded_list.remove(0).data()?; + if data.len() != 32 { + debug!("Ticket's src-node-id is not 32 bytes"); + return Err(DecoderError::RlpIsTooBig); + } + let mut raw = [0u8; 32]; + raw.copy_from_slice(data); + NodeId::new(&raw) + }; + + let src_ip = { + let data = decoded_list.remove(0).data()?; + match data.len() { + 4 => { + let mut ip = [0u8; 4]; + ip.copy_from_slice(data); + IpAddr::from(ip) + } + 16 => { + let mut ip = [0u8; 16]; + ip.copy_from_slice(data); + let ipv6 = Ipv6Addr::from(ip); + // If the ipv6 is ipv4 compatible/mapped, simply return the ipv4. + // Ipv6 for Discv5 is coming soon. + if let Some(ipv4) = ipv6.to_ipv4() { + IpAddr::V4(ipv4) + } else { + IpAddr::V6(ipv6) + } + } + _ => { + debug!("Ticket has incorrect byte length for src-ip"); + return Err(DecoderError::RlpIncorrectListLen); + } + } + }; + + let topic = decoded_list.remove(0).as_val::()?; + + let req_time = { + if let Ok(time_since_unix) = SystemTime::now().duration_since(UNIX_EPOCH) { + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); + let req_time_since_unix = Duration::from_secs(secs); + let time_since_req = time_since_unix - req_time_since_unix; + if let Some(req_time) = Instant::now().checked_sub(time_since_req) { + req_time + } else { + return Err(DecoderError::Custom( + "Could not compute ticket req-time instant", + )); + } + } else { + return Err(DecoderError::Custom("SystemTime before UNIX EPOCH!")); + } + }; + + let wait_time = { + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); + Duration::from_secs(secs) + }; + + /*let cum_wait = { + let secs_data = decoded_list.remove(0).data()?; + let mut secs_bytes = [0u8; 8]; + secs_bytes.copy_from_slice(secs_data); + let secs = u64::from_be_bytes(secs_bytes); + Duration::from_secs(secs) + };*/ + + Ok(Self { + src_node_id, + src_ip, + topic, + req_time, + wait_time, + //cum_wait, + }) + } +} + +/// Per topic, one registration attempt per node is stored at once. +/// Tickets that overlap based on these fields are considered equal. +impl PartialEq for Ticket { + fn eq(&self, other: &Self) -> bool { + self.src_node_id == other.src_node_id + && self.src_ip == other.src_ip + && self.topic == other.topic + } +} + +impl Ticket { + pub fn new( + src_node_id: NodeId, + src_ip: IpAddr, + topic: TopicHash, + req_time: Instant, + wait_time: Duration, + //cum_wait: Duration, + ) -> Self { + Ticket { + src_node_id, + src_ip, + topic, + req_time, + wait_time, + //cum_wait, + } + } + + pub fn topic(&self) -> TopicHash { + self.topic + } + + pub fn req_time(&self) -> Instant { + self.req_time + } + + pub fn wait_time(&self) -> Duration { + self.wait_time + } + + pub fn set_wait_time(&mut self, wait_time: Duration) { + self.wait_time = wait_time; + } + + /*pub fn cum_wait(&self) -> Duration { + self.cum_wait + } + + pub fn update_cum_wait(&mut self) { + self.cum_wait = self.cum_wait + self.wait_time; + }*/ +} + +impl std::fmt::Display for Ticket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Ticket: src node id: {}, src ip: {}, topic: {}, req time: {:?}, wait time: {}", + self.src_node_id, + self.src_ip, + self.topic, + self.req_time, + self.wait_time.as_secs() + ) + } +} + #[cfg(test)] mod tests { use super::*; + use aes_gcm::{ + aead::{generic_array::GenericArray, Aead, NewAead, Payload}, + Aes128Gcm, + }; use enr::EnrBuilder; #[test] fn ref_test_encode_request_ping() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let enr_seq = 1; @@ -628,12 +1027,14 @@ mod tests { // expected hex output let expected_output = hex::decode("01c20101").unwrap(); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_request_findnode() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let distances = vec![256]; @@ -644,13 +1045,15 @@ mod tests { // expected hex output let expected_output = hex::decode("03c501c3820100").unwrap(); - dbg!(hex::encode(message.clone().encode())); + dbg!(hex::encode(message.clone().encode(&ticket_key))); - assert_eq!(message.encode(), expected_output); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_ping() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let enr_seq = 1; @@ -664,12 +1067,14 @@ mod tests { // expected hex output let expected_output = hex::decode("02ca0101847f000001821388").unwrap(); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes_empty() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; @@ -684,16 +1089,18 @@ mod tests { nodes: vec![], }, }); - assert_eq!(message.encode(), expected_output); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; - let enr = "-HW4QCjfjuCfSmIJHxqLYfGKrSz-Pq3G81DVJwd_muvFYJiIOkf0bGtJu7kZVCOPnhSTMneyvR4MRbF3G5TNB4wy2ssBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); + let enr = "-HW4QCjfjuCfSmIJHxqLYfGKrSz-Pq3G81DVJwd_muvFYJiIOkf0bGtJu7kZVCOPnhSTMneyvR4MRbF3G5TNB4wy2ssBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); // expected hex output let expected_output = hex::decode("04f87b0101f877f875b84028df8ee09f4a62091f1a8b61f18aad2cfe3eadc6f350d527077f9aebc56098883a47f46c6b49bbb91954238f9e14933277b2bd1e0c45b1771b94cd078c32dacb0182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138").unwrap(); @@ -704,18 +1111,20 @@ mod tests { nodes: vec![enr], }, }); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_test_encode_response_nodes_multiple() { + let ticket_key: [u8; 16] = rand::random(); + // reference input let id = RequestId(vec![1]); let total = 1; - let enr = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); + let enr = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); - let enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::>().unwrap(); + let enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::().unwrap(); // expected hex output let expected_output = hex::decode("04f8f20101f8eef875b8401ce2991c64993d7c84c29a00bdc871917551c7d330fca2dd0d69c706596dc655448f030b98a77d4001fd46ae0112ce26d613c5a6a02a81a6223cd0c4edaa53280182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138f875b840d7f1c39e376297f81d7297758c64cb37dcc5c3beea9f57f7ce9695d7d5a67553417d719539d6ae4b445946de4d99e680eb8063f29485b555d45b7df16a1850130182696482763489736563703235366b31a1030e2cb74241c0c4fc8e8166f1a79a05d5b0dd95813a74b094529f317d5c39d235").unwrap(); @@ -727,18 +1136,19 @@ mod tests { nodes: vec![enr, enr2], }, }); - dbg!(hex::encode(message.clone().encode())); - assert_eq!(message.encode(), expected_output); + dbg!(hex::encode(message.clone().encode(&ticket_key))); + assert_eq!(message.encode(&ticket_key), expected_output); } #[test] fn ref_decode_response_nodes_multiple() { + let ticket_key: [u8; 16] = rand::random(); let input = hex::decode("04f8f20101f8eef875b8401ce2991c64993d7c84c29a00bdc871917551c7d330fca2dd0d69c706596dc655448f030b98a77d4001fd46ae0112ce26d613c5a6a02a81a6223cd0c4edaa53280182696482763489736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138f875b840d7f1c39e376297f81d7297758c64cb37dcc5c3beea9f57f7ce9695d7d5a67553417d719539d6ae4b445946de4d99e680eb8063f29485b555d45b7df16a1850130182696482763489736563703235366b31a1030e2cb74241c0c4fc8e8166f1a79a05d5b0dd95813a74b094529f317d5c39d235").unwrap(); - let expected_enr1 = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::>().unwrap(); - let expected_enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::>().unwrap(); + let expected_enr1 = "enr:-HW4QBzimRxkmT18hMKaAL3IcZF1UcfTMPyi3Q1pxwZZbcZVRI8DC5infUAB_UauARLOJtYTxaagKoGmIjzQxO2qUygBgmlkgnY0iXNlY3AyNTZrMaEDymNMrg1JrLQB2KTGtv6MVbcNEVv0AHacwUAPMljNMTg".parse::().unwrap(); + let expected_enr2 = "enr:-HW4QNfxw543Ypf4HXKXdYxkyzfcxcO-6p9X986WldfVpnVTQX1xlTnWrktEWUbeTZnmgOuAY_KUhbVV1Ft98WoYUBMBgmlkgnY0iXNlY3AyNTZrMaEDDiy3QkHAxPyOgWbxp5oF1bDdlYE6dLCUUp8xfVw50jU".parse::().unwrap(); - let decoded = Message::decode(&input).unwrap(); + let decoded = Message::decode(&input, &ticket_key).unwrap(); match decoded { Message::Response(response) => match response.body { @@ -755,20 +1165,22 @@ mod tests { #[test] fn encode_decode_ping_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, body: RequestBody::Ping { enr_seq: 15 }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_ping_response() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Response(Response { id, @@ -779,14 +1191,15 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_find_node_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, @@ -795,15 +1208,16 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] fn encode_decode_nodes_response() { - let key = CombinedKey::generate_secp256k1(); + let ticket_key: [u8; 16] = rand::random(); + let key = enr::CombinedKey::generate_secp256k1(); let enr1 = EnrBuilder::new("v4") .ip4("127.0.0.1".parse().unwrap()) .udp4(500) @@ -829,14 +1243,15 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } #[test] - fn encode_decode_ticket_request() { + fn encode_decode_talk_request() { + let ticket_key: [u8; 16] = rand::random(); let id = RequestId(vec![1]); let request = Message::Request(Request { id, @@ -846,173 +1261,195 @@ mod tests { }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(&encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } - /* - * These RPC messages are not in use yet - * - #[test] - fn ref_test_encode_request_ticket() { - // reference input - let id = 1; - let hash_bytes = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - // expected hex output - let expected_output = - hex::decode("05e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - let mut topic_hash = [0; 32]; - topic_hash.copy_from_slice(&hash_bytes); - - let message = Message::Request(Request { - id, - body: RequestBody::Ticket { topic: topic_hash }, - }); - assert_eq!(message.encode(), expected_output); - } - #[test] - fn ref_test_encode_request_register_topic() { - // reference input - let id = 1; - let ticket = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - // expected hex output - let expected_output = - hex::decode("07e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); + fn encode_decode_register_topic_request_empty_ticket() { + let ticket_key: [u8; 16] = rand::random(); - let message = Message::Request(Request { - id, - body: RequestBody::RegisterTopic { ticket }, + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::RegisterTopic { + topic: "lighthouse".to_string(), + ticket: RequestTicket::Empty, + }, }); - assert_eq!(message.encode(), expected_output); - } - - #[test] - fn ref_test_encode_request_topic_query() { - // reference input - let id = 1; - let hash_bytes = - hex::decode("fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - // expected hex output - let expected_output = - hex::decode("09e201a0fb757dc581730490a1d7a00deea65e9b1936924caaea8f44d476014856b68736") - .unwrap(); - - let mut topic_hash = [0; 32]; - topic_hash.copy_from_slice(&hash_bytes); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); - let message = Message::Request(Request { - id, - body: RequestBody::TopicQuery { topic: topic_hash }, - }); - assert_eq!(message.encode(), expected_output); + assert_eq!(request, decoded); } #[test] - fn ref_test_encode_response_register_topic() { - // reference input - let id = 1; - let registered = true; - - // expected hex output - let expected_output = hex::decode("08c20101").unwrap(); - let message = Message::Response(Response { - id, - body: ResponseBody::RegisterTopic { registered }, - }); - assert_eq!(message.encode(), expected_output); - } + fn encode_decode_ticket_transit() { + let local_ticket_key: [u8; 16] = rand::random(); + let remote_ticket_key: [u8; 16] = rand::random(); - #[test] - fn encode_decode_register_topic_request() { - let request = Message::Request(Request { - id: 1, - body: RequestBody::RegisterTopic { - topic: vec![1,2,3], - ticket: vec![1, 2, 3, 4, 5], + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); + let key = enr::CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let node_id = enr.node_id(); + + let og_ticket = Ticket::new( + node_id, + ip, + TopicHash::from_raw([1u8; 32]), + Instant::now(), + Duration::from_secs(11), + //Duration::from_secs(25), + ); + + // The local node sends a ticket response + let response = Message::Response(Response { + id: RequestId(vec![1]), + body: ResponseBody::Ticket { + ticket: ResponseTicket::LocallyIssued(og_ticket.clone()), + wait_time: 1u64, + topic: "lighthouse".to_string(), }, }); - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); - - assert_eq!(request, decoded); + let encoded_resp = response.encode(&local_ticket_key); + + // The response arrives at the remote peer + let decoded_resp = Message::decode(&encoded_resp, &remote_ticket_key).unwrap(); + + if let Message::Response(Response { + id: _, + body: + ResponseBody::Ticket { + ticket: ResponseTicket::RemotelyIssued(ticket_bytes), + .. + }, + }) = decoded_resp + { + // The remote peer returns the ticket to the issuer + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::RegisterTopic { + topic: "lighthouse".to_string(), + ticket: RequestTicket::RemotelyIssued(ticket_bytes), + }, + }); + + let encoded_req = request.encode(&remote_ticket_key); + + // The request arrives at the issuer who decodes it + let decoded_req = Message::decode(&encoded_req, &local_ticket_key).unwrap(); + + if let Message::Request(Request { + id: _, + body: + RequestBody::RegisterTopic { + topic: _, + ticket: RequestTicket::LocallyIssued(ticket), + }, + }) = decoded_req + { + assert_eq!(og_ticket, ticket); + } else { + panic!(); + } + } else { + panic!(); + } } #[test] - fn encode_decode_register_topic_response() { - let request = Message::Response(Response { - id: 0, - body: ResponseBody::RegisterTopic { registered: true }, - }); + fn encode_decode_request_ticket() { + // Create the test values needed + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let key = enr::CombinedKey::generate_secp256k1(); - assert_eq!(request, decoded); - } + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + TopicHash::from_raw([1u8; 32]), + Instant::now(), + Duration::from_secs(11), + //Duration::from_secs(25), + ); - #[test] - fn encode_decode_topic_query_request() { - let request = Message::Request(Request { - id: 1, - body: RequestBody::TopicQuery { topic: [17u8; 32] }, - }); + let encoded = RequestTicket::LocallyIssued(ticket.clone()).encode(); - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let decoded = RequestTicket::decode(&encoded).unwrap(); - assert_eq!(request, decoded); + assert_eq!(RequestTicket::LocallyIssued(ticket), decoded); } #[test] - fn ref_test_encode_response_ticket() { - // reference input - let id = 1; - let ticket = [0; 32].to_vec(); // all 0's - let wait_time = 5; + fn encode_decode_request_ticket_with_encryption() { + // Create the test values needed + let port = 5000; + let ip: IpAddr = "127.0.0.1".parse().unwrap(); - // expected hex output - let expected_output = hex::decode( - "06e301a0000000000000000000000000000000000000000000000000000000000000000005", - ) + let key = enr::CombinedKey::generate_secp256k1(); + + let enr = EnrBuilder::new("v4").ip(ip).udp4(port).build(&key).unwrap(); + let node_id = enr.node_id(); + let ticket = Ticket::new( + node_id, + ip, + TopicHash::from_raw([1u8; 32]), + Instant::now(), + Duration::from_secs(11), + //Duration::from_secs(25), + ); + + let ticket_key: [u8; 16] = rand::random(); + + let encoded = RequestTicket::LocallyIssued(ticket.clone()).encode(); + + let encrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(&ticket_key)); + let payload = Payload { + msg: &encoded, + aad: b"", + }; + aead.encrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .unwrap() + }; + + let decrypted_ticket = { + let aead = Aes128Gcm::new(GenericArray::from_slice(&ticket_key)); + let payload = Payload { + msg: &encrypted_ticket, + aad: b"", + }; + aead.decrypt(GenericArray::from_slice(&[1u8; 12]), payload) + .map_err(|e| error!("Failed to decode ticket in REGTOPIC query: {}", e)) + } .unwrap(); - let message = Message::Response(Response { - id, - body: ResponseBody::Ticket { ticket, wait_time }, - }); - assert_eq!(message.encode(), expected_output); + let decoded = RequestTicket::decode(&decrypted_ticket).unwrap(); + + assert_eq!(encoded, decrypted_ticket); + assert_eq!(RequestTicket::LocallyIssued(ticket), decoded); } #[test] - fn encode_decode_ticket_response() { - let request = Message::Response(Response { - id: 0, - body: ResponseBody::Ticket { - ticket: vec![1, 2, 3, 4, 5], - wait_time: 5, + fn encode_decode_topic_query_request() { + let ticket_key: [u8; 16] = rand::random(); + + let request = Message::Request(Request { + id: RequestId(vec![1]), + body: RequestBody::TopicQuery { + topic: TopicHash::from_raw([1u8; 32]), }, }); - - let encoded = request.clone().encode(); - let decoded = Message::decode(encoded).unwrap(); + let encoded = request.clone().encode(&ticket_key); + let decoded = Message::decode(&encoded, &ticket_key).unwrap(); assert_eq!(request, decoded); } - - */ } diff --git a/src/service.rs b/src/service.rs index 1dd39ea2c..be0923dee 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1,44 +1,60 @@ //! The Discovery v5 protocol. See `lib.rs` for further details. //! -//! Note: Discovered ENR's are not automatically added to the routing table. Only established +//! Note: Discovered ENRs are not automatically added to the routing table. Only established //! sessions get added, ensuring only valid ENRs are added. Manual additions can be made using the //! `add_enr()` function. //! //! Response to queries return `PeerId`. Only the trusted (a session has been established with) -//! `PeerId`'s are returned, as ENR's for these `PeerId`'s are stored in the routing table and as +//! `PeerId`'s are returned, as ENRs for these `PeerId`'s are stored in the routing table and as //! such should have an address to connect to. Untrusted `PeerId`'s can be obtained from the //! `Service::Discovered` event, which is fired as peers get discovered. //! //! Note that although the ENR crate does support Ed25519 keys, these are currently not //! supported as the ECDH procedure isn't specified in the specification. Therefore, only //! secp256k1 keys are supported currently. - use self::{ ip_vote::IpVote, query_info::{QueryInfo, QueryType}, }; use crate::{ + advertisement::{ + ticket::{Tickets, MAX_WAIT_TIME_TICKET, TICKET_LIMIT_DURATION}, + topic::TopicHash, + Ads, AD_LIFETIME, + }, + discv5::{ + supports_feature, Features, ENR_KEY_TOPICS, KBUCKET_PENDING_TIMEOUT, PERMIT_BAN_LIST, + }, error::{RequestError, ResponseError}, handler::{Handler, HandlerIn, HandlerOut}, kbucket::{ self, ConnectionDirection, ConnectionState, FailureReason, InsertResult, KBucketsTable, NodeStatus, UpdateResult, }, + metrics::METRICS, node_info::{NodeAddress, NodeContact, NonContactable}, packet::MAX_PACKET_SIZE, query_pool::{ FindNodeQueryConfig, PredicateQueryConfig, QueryId, QueryPool, QueryPoolState, TargetKey, }, - rpc, Discv5Config, Discv5Event, Enr, + rpc, Discv5Config, Discv5Event, Enr, Topic, TopicsEnrField, }; use delay_map::HashSetDelay; use enr::{CombinedKey, NodeId}; use fnv::FnvHashMap; -use futures::prelude::*; +use futures::{future::select_all, prelude::*}; use more_asserts::debug_unreachable; use parking_lot::RwLock; use rpc::*; -use std::{collections::HashMap, net::SocketAddr, sync::Arc, task::Poll, time::Instant}; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + io::Error, + net::SocketAddr, + pin::Pin, + sync::{atomic::Ordering, Arc}, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot}; use tracing::{debug, error, info, trace, warn}; @@ -46,10 +62,29 @@ mod ip_vote; mod query_info; mod test; +/// The log2distance between two keys. +pub type Log2Distance = u64; + /// The number of distances (buckets) we simultaneously request from each peer. /// NOTE: This must not be larger than 127. pub(crate) const DISTANCES_TO_REQUEST_PER_PEER: usize = 3; +/// The maximum number of registration attempts that may be active per distance +/// if there are sufficient peers. +const MAX_REG_ATTEMPTS_PER_LOG2DISTANCE: usize = 16; + +/// Registration of topics are paced to occur at intervals to avoid a self-provoked DoS. +const REGISTER_INTERVAL: Duration = Duration::from_secs(60); + +/// Registration attempts must be limited per registration interval. +const MAX_REGTOPICS_REGISTER_PER_INTERVAL: usize = 16; + +/// The max number of uncontacted peers to store before the kbuckets per topic. +const MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET: usize = 16; + +/// The duration in seconds which a node can come late to an assigned wait time. +const WAIT_TIME_TOLERANCE: Duration = Duration::from_secs(5); + /// Request type for Protocols using `TalkReq` message. /// /// Automatically responds with an empty body on drop if @@ -123,6 +158,15 @@ impl TalkRequest { } } +/// The active and temporarily limited (too many tickets received from a node +/// in a given time span) registration attempts. Upon sending a REGTOPIC to +/// a node, it is inserted into RegAttempts with RegistrationState::Ticket. +#[derive(Default, Clone)] +pub struct RegAttempts { + /// One registration attempt per node is allowed at a time. + pub reg_attempts: HashMap, +} + /// The types of requests to send to the Discv5 service. pub enum ServiceRequest { /// A request to start a query. There are two types of queries: @@ -142,10 +186,33 @@ pub enum ServiceRequest { /// Sets up an event stream where the discv5 server will return various events such as /// discovered nodes as it traverses the DHT. RequestEventStream(oneshot::Sender>), + /// Starts a topic look up of nodes advertising a topic in a discv5 network. + TopicQuery(Topic, oneshot::Sender, RequestError>>), + /// Retrieves a list of previously looked up topics, i.e. topics pertaining a set of topic's kbuckets. + TopicQueryHistory(oneshot::Sender>), + /// Removes a topic from the [`ServiceRequest::TopicQueryHistory`]. + RemoveFromTopicQueryHistory(Topic, oneshot::Sender>), + /// RegisterTopic publishes this node as an advertiser for a topic in a discv5 network + /// until removed. + RegisterTopic(Topic, oneshot::Sender>), + /// Retrieves the registration attempts active for a given topic. + RegistrationAttempts( + Topic, + oneshot::Sender, RequestError>>, + ), + /// Retrieves the ads currently published by this node on other nodes in a discv5 network. + ActiveTopics(oneshot::Sender>, RequestError>>), + /// Stops publishing this node as an advertiser for a topic. + StopRegistrationOfTopic(Topic, oneshot::Sender>), + /// Retrieves the ads advertised for other nodes for a given topic. + Ads(TopicHash, oneshot::Sender>), + /// Retrieves the node id of entries in a given topic's kbuckets by log2distance (bucket index). + TableEntriesIdTopicKBuckets( + TopicHash, + oneshot::Sender>, RequestError>>, + ), } -use crate::discv5::PERMIT_BAN_LIST; - pub struct Service { /// Configuration parameters. config: Discv5Config, @@ -192,10 +259,138 @@ pub struct Service { /// A channel that the service emits events on. event_stream: Option>, + + /// Ads advertised locally for other nodes. + ads: Ads, + + /// Registrations attempts underway for each topic stored by bucket index, i.e. the + /// log2distance to the local node id. + registration_attempts: HashMap>, + + /// The topics that have been looked-up. Upon insertion a set of kbuckets is initialised for + /// the topic, if one didn't already exist from registration. Keeping these kbuckets until + /// a topic is manually removed from topic_lookups (and registration_attempts) makes the + /// repeated look-up for the same topic less costly. + topic_lookups: HashSet, + + /// KBuckets per topic hash. + topics_kbuckets: HashMap>, + + /// The peers returned in a NODES response to a TOPICQUERY or REGTOPIC request are inserted in + /// this intermediary storage to check their connectivity before inserting them in the topic's + /// kbuckets. Peers are stored by bucket index, i.e. the log2distance to the local node id. + discovered_peers_topic: HashMap>>, + + /// Tickets received from other nodes. + tickets: Tickets, + + /// Locally initiated topic query requests in process. + active_topic_queries: ActiveTopicQueries, +} + +/// The state of a topic lookup which changes as responses to sent TOPICQUERYs are received. +/// A topic look up may require more than one round of sending TOPICQUERYs to obtain the set +/// number of ads for the topic. +#[derive(Debug)] +pub enum TopicQueryState { + /// The topic look up has obtained enough results. + Finished(TopicHash), + /// The topic look up has not obtained enough results and has timed out. + TimedOut(TopicHash), + /// Not enough ads have been returned from the first round of sending TOPICQUERY + /// requests, new peers in the topic's kbuckets should be queried. + Unsatisfied(TopicHash), +} + +/// At any given time, a set number of registrations should be active per topic hash to +/// set to be registered. A registration is active when either a ticket for an ad slot is +/// held and the ticket wait time has not yet expired, or a REGCONFIRMATION has been +/// received for an ad slot and the ad lifetime has not yet elapsed. +#[derive(Debug, Clone)] +pub enum RegistrationState { + /// A REGCONFIRMATION has been received at the given instant. + Confirmed(Instant), + /// A TICKET has been received and the ticket is being held for the duration of the + /// wait time. + Ticket, + /// A fixed number of tickets are accepted within a certain time span. A node id in + /// ticket limit registration state will not be sent a REGTOPIC till the ticket + /// [`TICKET_LIMIT_DURATION`] has expired. + TicketLimit(Instant), +} + +/// An active topic query/lookup keeps track of which peers from the topic's kbuckets +/// have already been queried until the set number of ads are found for the lookup or it +/// is prematurely terminated by lack of peers or time. +pub struct ActiveTopicQuery { + /// A NodeId mapped to false is waiting for a response. A value of true means the + /// TOPICQUERY has received a response or the request has failed. + queried_peers: HashMap, + /// An ad returned by multiple peers is only included once in the results. + results: HashMap, + /// The resulting ad nodes are returned to the app layer when the query has reached + /// a Finished, TimedOut or Dry state. + callback: Option, RequestError>>>, + /// A start time is used to monitor time out of the query. + start: Instant, + /// A query is marked as dry being true if no peers are found in the topic's kbuckets + /// that aren't already queried peers. + dry: bool, +} + +/// ActiveTopicQueries marks the progress of active topic queries/lookups. +pub struct ActiveTopicQueries { + /// Each topic lookup initiates an ActiveTopicQuery process. + queries: HashMap, + /// The time out for any topic lookup. + time_out: Duration, + /// The number of ads an ActiveTopicQuery sets out to find. + num_results: usize, +} + +impl ActiveTopicQueries { + pub fn new(time_out: Duration, num_results: usize) -> Self { + ActiveTopicQueries { + queries: HashMap::new(), + time_out, + num_results, + } + } +} + +impl Stream for ActiveTopicQueries { + type Item = TopicQueryState; + fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + for (topic_hash, query) in self.queries.iter() { + if query.results.len() >= self.num_results { + return Poll::Ready(Some(TopicQueryState::Finished(*topic_hash))); + } else if query.start.elapsed() >= self.time_out { + warn!( + "TOPICQUERY timed out. Only {} ads found for topic hash.", + query.results.len() + ); + return Poll::Ready(Some(TopicQueryState::TimedOut(*topic_hash))); + } else if query.dry { + return Poll::Pending; + } else { + let exhausted_peers = query + .queried_peers + .iter() + .filter(|(_peer, return_status)| **return_status) + .count(); + // If all peers have responded or failed the request and we still did not + // obtain enough results, the query is in TopicQueryState::Unsatisfied. + if exhausted_peers >= query.queried_peers.len() { + return Poll::Ready(Some(TopicQueryState::Unsatisfied(*topic_hash))); + } + } + } + Poll::Pending + } } /// Active RPC request awaiting a response from the handler. -struct ActiveRequest { +pub struct ActiveRequest { /// The address the request was sent to. pub contact: NodeContact, /// The request that was sent. @@ -210,7 +405,7 @@ struct ActiveRequest { pub enum CallbackResponse { /// A response to a requested ENR. Enr(oneshot::Sender>), - /// A response from a TALK request + /// A response from a TALK request. Talk(oneshot::Sender, RequestError>>), } @@ -244,7 +439,7 @@ impl Service { kbuckets: Arc>>, config: Discv5Config, listen_socket: SocketAddr, - ) -> Result<(oneshot::Sender<()>, mpsc::Sender), std::io::Error> { + ) -> Result<(oneshot::Sender<()>, mpsc::Sender), Error> { // process behaviour-level configuration parameters let ip_votes = if config.enr_update { Some(IpVote::new( @@ -287,6 +482,16 @@ impl Service { peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, + ads: Ads::default(), + registration_attempts: HashMap::new(), + topic_lookups: Default::default(), + topics_kbuckets: HashMap::new(), + discovered_peers_topic: HashMap::new(), + tickets: Tickets::default(), + active_topic_queries: ActiveTopicQueries::new( + config.topic_query_timeout, + config.max_nodes_response, + ), exit, config: config.clone(), }; @@ -300,7 +505,15 @@ impl Service { /// The main execution loop of the discv5 serviced. async fn start(&mut self) { - tracing::info!("{:?}", self.config.ip_mode); + // In the case where not many peers populate the topic's kbuckets, ensure topics keep being republished. + let mut registration_interval = tokio::time::interval(REGISTER_INTERVAL); + let mut topics_to_reg_iter = self + .registration_attempts + .keys() + .map(|topic| (topic.clone(), topic.hash())) + .collect::>() + .into_iter(); + loop { tokio::select! { _ = &mut self.exit => { @@ -315,10 +528,11 @@ impl Service { ServiceRequest::StartQuery(query, callback) => { match query { QueryKind::FindNode { target_node } => { - self.start_findnode_query(target_node, callback); + let query_type = QueryType::FindNode(target_node); + self.start_findnode_query(query_type, Some(callback)); } QueryKind::Predicate { target_node, target_peer_no, predicate } => { - self.start_predicate_query(target_node, target_peer_no, predicate, callback); + self.start_predicate_query(target_node, target_peer_no, predicate, Some(callback)); } } } @@ -338,23 +552,134 @@ impl Service { error!("Failed to return the event stream channel"); } } + ServiceRequest::TopicQuery(topic, callback) => { + // Store the topic to make sure the kbuckets for the topic persist for repeated + // look ups. + self.topic_lookups.insert(topic.clone()); + + let topic_hash = topic.hash(); + // If we look up the topic hash for the first time, and aren't registering it, + // we initialise its kbuckets. + if let Entry::Vacant(_) = self.topics_kbuckets.entry(topic_hash) { + self.init_topic_kbuckets(topic_hash); + } + // To fill the kbuckets closest to the topic hash as well as those further away + // (iteratively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); + + self.send_topic_queries(topic_hash, Some(callback)); + } + ServiceRequest::RemoveFromTopicQueryHistory(topic, callback) => { + let result = if self.topic_lookups.remove(&topic) { + // If this topic isn't being registered, free the storage occupied by the topic's kbuckets + // and get rid of the overhead needed to maintain the those kbuckets. + if !self.registration_attempts.contains_key(&topic) { + self.topics_kbuckets.remove(&topic.hash()); + } + Ok(()) + } else { + Err(RequestError::TopicNotQueried) + }; + if callback.send(result).is_err() { + error!("Failed to return result of remove topic query operation for topic {}", topic); + } + } + ServiceRequest::TopicQueryHistory(callback) => { + if callback.send(self.topic_lookups.iter().cloned().collect::>()).is_err() { + error!("Failed to return topic query history"); + } + } + ServiceRequest::RegisterTopic(topic, callback) => { + let result = self.start_topic_registration(topic.clone()); + if callback.send(result).is_err() { + error!("Failed to return result of register topic operation for topic {}", topic); + } + } + ServiceRequest::StopRegistrationOfTopic(topic, callback) => { + // If we have any pending tickets, discard those, i.e. don't return the ticket to the + // peer that issued it. + self.tickets.remove(&topic); + + let result = if self.registration_attempts.remove(&topic).is_some() { + METRICS.topics_to_publish.store(self.registration_attempts.len(), Ordering::Relaxed); + // If this topic isn't being looked up, free the storage occupied by the topic's kbuckets + // and get rid of the overhead needed to maintain the those kbuckets. + if !self.topic_lookups.contains(&topic) { + self.topics_kbuckets.remove(&topic.hash()); + } + Ok(()) + } else { + Err(RequestError::TopicNotRegistered) + }; + + if callback.send(result).is_err() { + error!("Failed to return the result of the deregister topic operation for topic {}", topic); + } + } + ServiceRequest::ActiveTopics(callback) => { + if callback.send(Ok(self.get_active_topics())).is_err() { + error!("Failed to return active topics"); + } + } + ServiceRequest::Ads(topic_hash, callback) => { + let ads = self.ads.get_ad_nodes(topic_hash).map(|ad_node| ad_node.node_record().clone()).collect::>(); + if callback.send(ads).is_err() { + error!("Failed to return ads for topic {}", topic_hash); + } + } + ServiceRequest::RegistrationAttempts(topic_hash, callback) => { + let reg_attempts = if let Some(reg_attempts) = self.registration_attempts.get(&topic_hash) { + Ok(reg_attempts.clone()) + } else { + error!("Topic hash {} is not being registered", topic_hash); + Err(RequestError::TopicNotRegistered) + }; + if callback.send(reg_attempts).is_err() { + error!("Failed to return registration attempts for topic hash {}", topic_hash); + } + } + ServiceRequest::TableEntriesIdTopicKBuckets(topic_hash, callback) => { + let table_entries = if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + let mut entries = BTreeMap::new(); + for (index, bucket) in kbuckets.buckets_iter().enumerate() { + // The bucket's index in the Vec of buckets in the kbucket table will + // be one less than the distance as the log2distance 0 from the local + // node, i.e. the local node, is not assigned a bucket. + let distance = index as Log2Distance + 1; + let mut node_ids = Vec::new(); + bucket.iter().for_each(|node| node_ids.push(*node.key.preimage())); + entries.insert(distance, node_ids); + } + Ok(entries) + } else { + Err(RequestError::TopicKBucketsUninitialised) + }; + if callback.send(table_entries).is_err() { + error!("Failed to return table entries' ids for topic hash {}", topic_hash); + } + } } } Some(event) = self.handler_recv.recv() => { match event { HandlerOut::Established(enr, socket_addr, direction) => { self.send_event(Discv5Event::SessionEstablished(enr.clone(), socket_addr)); - self.inject_session_established(enr, direction); + self.inject_session_established(enr, direction, None); + } + HandlerOut::EstablishedTopic(enr, direction, topic_hash) => { + self.inject_session_established(enr, direction, Some(topic_hash)); } HandlerOut::Request(node_address, request) => { - self.handle_rpc_request(node_address, *request); - } + self.handle_rpc_request(node_address, *request); + } HandlerOut::Response(node_address, response) => { - self.handle_rpc_response(node_address, *response); - } + self.handle_rpc_response(node_address, *response); + } HandlerOut::WhoAreYou(whoareyou_ref) => { // check what our latest known ENR is for this node. - if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id) { + if let Some(known_enr) = self.find_enr(&whoareyou_ref.0.node_id, true) { if let Err(e) = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, Some(known_enr))) { warn!("Failed to send whoareyou {}", e); }; @@ -362,7 +687,7 @@ impl Service { // do not know of this peer debug!("NodeId unknown, requesting ENR. {}", whoareyou_ref.0); if let Err(e) = self.handler_send.send(HandlerIn::WhoAreYou(whoareyou_ref, None)) { - warn!("Failed to send who are you to unknonw enr peer {}", e); + warn!("Failed to send who are you to unknown enr peer {}", e); } } } @@ -379,23 +704,27 @@ impl Service { event = Service::bucket_maintenance_poll(&self.kbuckets) => { self.send_event(event); } + Some(event) = Service::bucket_maintenance_poll_topics(self.topics_kbuckets.iter_mut()) => { + debug!("{}", event); + } query_event = Service::query_event_poll(&mut self.queries) => { match query_event { QueryEvent::Waiting(query_id, node_id, request_body) => { - self.send_rpc_query(query_id, node_id, request_body); + self.send_rpc_query(query_id, node_id, *request_body); } // Note: Currently the distinction between a timed-out query and a finished // query is superfluous, however it may be useful in future versions. QueryEvent::Finished(query) | QueryEvent::TimedOut(query) => { let id = query.id(); + let query_type = query.target().query_type.clone(); let mut result = query.into_result(); - // obtain the ENR's for the resulting nodes + // obtain the ENRs for the resulting nodes let mut found_enrs = Vec::new(); for node_id in result.closest_peers { if let Some(position) = result.target.untrusted_enrs.iter().position(|enr| enr.node_id() == node_id) { let enr = result.target.untrusted_enrs.swap_remove(position); found_enrs.push(enr); - } else if let Some(enr) = self.find_enr(&node_id) { + } else if let Some(enr) = self.find_enr(&node_id, true) { // look up from the routing table found_enrs.push(enr); } @@ -403,8 +732,91 @@ impl Service { warn!("ENR not present in queries results"); } } - if result.target.callback.send(found_enrs).is_err() { - warn!("Callback dropped for query {}. Results dropped", *id); + + match result.target.callback { + Some(callback) => { + if callback.send(found_enrs).is_err() { + warn!("Callback dropped for query {}. Results dropped", *id); + } + } + None => { + // This was an automatically initiated query to look for more peers + // for a give topic's kbuckets + if let QueryType::FindTopic(topic_key) = query_type { + let topic_hash = TopicHash::from_raw(topic_key.raw()); + let mut discovered_new_peer = false; + if let Some(kbuckets_topic) = self.topics_kbuckets.get_mut(&topic_hash) { + for enr in found_enrs { + if !supports_feature(&enr, Features::Topics) { + continue; + } + trace!("Found new peer {} for topic {}", enr, topic_hash); + let key = kbucket::Key::from(enr.node_id()); + + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. + + let must_update_enr = match kbuckets_topic.entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Absent(_) => { + trace!( + "Discovered new peer {} for topic hash {}", + enr.node_id(), + topic_hash + ); + // A QueryType::FindTopic variant will always time out. The last batch of + // ENRs returned by the last iteration in the query is added to + // discovered_peers_topic, like previous batches of uncontacted peers were + // added to the query itself first. + let discovered_peers = + self.discovered_peers_topic.entry(topic_hash).or_default(); + + let node_id = enr.node_id(); + let peer_key: kbucket::Key = node_id.into(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let bucket = discovered_peers.entry(distance).or_default(); + // If the intermediary storage before the topic's kbuckets is at bounds, discard the + // uncontacted peers. + if bucket.len() < MAX_UNCONTACTED_PEERS_PER_TOPIC_BUCKET { + bucket.insert(node_id, enr.clone()); + discovered_new_peer = true; + } else { + debug!("Discarding uncontacted peers, uncontacted peers at bounds for topic hash {}", topic_hash); + } + } + false + } + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + kbuckets_topic.update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR of peer {} for kbucket of topic hash {:?}. Reason: {:?}", + topic_hash, enr.node_id(), reason + ); + } else { + // If the enr was successfully updated, progress might be made in a topic lookup + discovered_new_peer = true; + } + } + } + if discovered_new_peer { + // If a topic lookup has dried up (no more peers to query), and we now have found new peers or updated enrs for + // known peers to that topic, the query can now proceed as long as it hasn't timed out already. + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + debug!("Found new peers to send TOPICQUERY to, unsetting query status dry"); + query.dry = false; + } + } + } + } + } } } } @@ -424,14 +836,410 @@ impl Service { self.send_ping(enr); } } + Some(Ok((active_topic, active_ticket))) = self.tickets.next() => { + // When the ticket time expires a new REGTOPIC request is automatically sent to the + // ticket issuer and the registration attempt stays in the [`RegistrationState::Ticket`] + // from sending the first REGTOPIC request to this contact for this topic. + self.reg_topic_request(active_ticket.contact(), active_topic.topic().clone(), RequestTicket::RemotelyIssued(active_ticket.ticket())); + } + Some(topic_query_progress) = self.active_topic_queries.next() => { + match topic_query_progress { + TopicQueryState::Finished(topic_hash) | TopicQueryState::TimedOut(topic_hash) => { + if let Some(query) = self.active_topic_queries.queries.remove(&topic_hash) { + if let Some(callback) = query.callback { + if callback.send(Ok(query.results.into_values().collect::>())).is_err() { + warn!("Callback dropped for topic query {}. Results dropped", topic_hash); + } + } + } + } + TopicQueryState::Unsatisfied(topic_hash) => self.send_topic_queries(topic_hash, None), + } + } + _ = registration_interval.tick() => { + trace!("New registration interval, {}/{} topics to publish", topics_to_reg_iter.clone().count(), self.registration_attempts.len()); + let mut sent_regtopics = 0; + let mut topic_item = topics_to_reg_iter.next(); + while let Some((topic, _topic_hash)) = topic_item { + trace!("Publishing topic {} with hash {}", topic, topic.hash()); + topic_item = topics_to_reg_iter.next(); + // It could be that a topic has been set to stop registration since the + // iteration through topics_to_reg_iter was started, in that case skip + // that topic. + if !self.registration_attempts.contains_key(&topic) { + continue; + } + sent_regtopics += self.send_register_topics(topic.clone()); + if sent_regtopics >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { + break + } + } + if topics_to_reg_iter.next().is_none() { + topics_to_reg_iter = self.registration_attempts.keys().map(|topic| (topic.clone(), topic.hash())).collect::>().into_iter(); + } + } + } + } + } + + fn get_active_topics(&mut self) -> HashMap> { + let mut active_topics = HashMap::>::new(); + self.registration_attempts + .iter_mut() + .for_each(|(topic, reg_attempts_by_distance)| { + for reg_attempts in reg_attempts_by_distance.values_mut() { + reg_attempts + .reg_attempts + .retain(|node_id, reg_state| match reg_state { + RegistrationState::Confirmed(insert_time) => { + if insert_time.elapsed() < AD_LIFETIME { + active_topics + .entry(topic.clone()) + .or_default() + .push(*node_id); + true + } else { + false + } + } + RegistrationState::TicketLimit(insert_time) => { + insert_time.elapsed() < TICKET_LIMIT_DURATION + } + RegistrationState::Ticket => true, + }); + } + }); + active_topics + } + + fn init_topic_kbuckets(&mut self, topic_hash: TopicHash) { + trace!("Initiating kbuckets for topic hash {}", topic_hash); + + // NOTE: Currently we don't expose custom filter support in the configuration. Users can + // optionally use the IP filter via the ip_limit configuration parameter. In the future, we + // may expose this functionality to the users if there is demand for it. + let (table_filter, bucket_filter) = if self.config.ip_limit { + ( + Some(Box::new(kbucket::IpTableFilter) as Box>), + Some(Box::new(kbucket::IpBucketFilter) as Box>), + ) + } else { + (None, None) + }; + + let mut kbuckets = KBucketsTable::new( + NodeId::new(&topic_hash.as_bytes()).into(), + KBUCKET_PENDING_TIMEOUT, + self.config.incoming_bucket_limit, + table_filter, + bucket_filter, + ); + + debug!( + "Adding {} entries from local routing table to topic's kbuckets", + self.kbuckets.write().iter().count() + ); + + for entry in self.kbuckets.write().iter() { + let enr = entry.node.value.clone(); + if !supports_feature(&enr, Features::Topics) { + continue; + } + match kbuckets.insert_or_update(entry.node.key, enr, entry.status) { + InsertResult::Inserted + | InsertResult::Pending { .. } + | InsertResult::StatusUpdated { .. } + | InsertResult::ValueUpdated + | InsertResult::Updated { .. } + | InsertResult::UpdatedPending => trace!( + "Added node id {} to kbucket of topic hash {}", + entry.node.value.node_id(), + topic_hash + ), + InsertResult::Failed(f) => error!( + "Failed to insert ENR for topic hash {}. Failure reason: {:?}", + topic_hash, f + ), + } + } + self.topics_kbuckets.insert(topic_hash, kbuckets); + } + + /// Starts the continuous process of registering a topic, i.e. advertising it at peers. + fn start_topic_registration(&mut self, topic: Topic) -> Result<(), RequestError> { + let topic_hash = topic.hash(); + if self.registration_attempts.contains_key(&topic) { + warn!("The topic {} is already being advertised", topic); + return Err(RequestError::TopicAlreadyRegistered); + } + self.registration_attempts + .insert(topic.clone(), BTreeMap::new()); + + let topics_field = |topic: Topic| -> TopicsEnrField { + if let Some(topics) = self.local_enr.read().get(ENR_KEY_TOPICS) { + if let Ok(Some(mut advertised_topics)) = TopicsEnrField::decode(topics) { + advertised_topics.add(topic); + return advertised_topics; + } + } + let mut advertised_topics = TopicsEnrField::new(Vec::new()); + advertised_topics.add(topic); + advertised_topics + }; + + let encoded_topics_field = topics_field(topic.clone()).encode(); + + let enr_size = self.local_enr.read().size() + encoded_topics_field.len(); + if enr_size >= 300 { + error!("Failed to register topic {}. The ENR would be a total of {} bytes if this topic was registered, the maximum size is 300 bytes", topic.topic(), enr_size); + return Err(RequestError::InsufficientSpaceEnr(topic)); + } + + let result = self.local_enr.write().insert( + ENR_KEY_TOPICS, + &encoded_topics_field, + &self.enr_key.write(), + ); + + match result { + Err(e) => { + error!( + "Failed to insert field 'topics' into local enr. Error {:?}", + e + ); + Err(RequestError::EnrWriteFailed) + } + Ok(_) => { + self.init_topic_kbuckets(topic_hash); + METRICS + .topics_to_publish + .store(self.registration_attempts.len(), Ordering::Relaxed); + + // To fill the kbuckets closest to the topic hash as well as those further away + // (iteratively getting closer to node ids to the topic hash) start a find node + // query searching for the topic hash's bytes wrapped in a NodeId. + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); + Ok(()) + } + } + } + + /// Internal function that starts a topic registration. This function should not be called outside of [`REGISTER_INTERVAL`]. + fn send_register_topics(&mut self, topic: Topic) -> usize { + trace!("Sending REGTOPICS for topic {}", topic); + let topic_hash = topic.hash(); + if let Entry::Occupied(ref mut kbuckets) = self.topics_kbuckets.entry(topic_hash) { + trace!( + "Found {} entries in kbuckets of topic hash {}", + kbuckets.get_mut().iter().count(), + topic_hash + ); + let reg_attempts = self.registration_attempts.entry(topic.clone()).or_default(); + let mut new_peers = Vec::new(); + + // Ensure that max_reg_attempts_bucket registration attempts are alive per bucket if that many peers are + // available at that distance. + for (index, bucket) in kbuckets.get_mut().buckets_iter().enumerate() { + if new_peers.len() >= MAX_REGTOPICS_REGISTER_PER_INTERVAL { + break; + } + let distance = index as Log2Distance + 1; + let mut active_reg_attempts_bucket = 0; + + let registrations = reg_attempts.entry(distance).or_default(); + + // Remove expired registrations and ticket limit blockages. + registrations.reg_attempts.retain(|node_id, reg_state| { + trace!("Registration attempt of node id {}, reg state {:?} at distance {}", node_id, reg_state, distance); + match reg_state { + RegistrationState::Confirmed(insert_time) => { + if insert_time.elapsed() < AD_LIFETIME { + active_reg_attempts_bucket += 1; + true + } else { + trace!("Registration has expired for node id {}. Removing from registration attempts.", node_id); + false + } + } + RegistrationState::TicketLimit(insert_time) => insert_time.elapsed() < TICKET_LIMIT_DURATION, + RegistrationState::Ticket => { + active_reg_attempts_bucket += 1; + true + } + } + }); + + let mut new_peers_bucket = Vec::new(); + + // Attempt sending a request to uncontacted peers if any. + if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { + if let Some(bucket) = peers.get_mut(&distance) { + bucket.retain(|node_id, enr | { + if new_peers_bucket.len() + active_reg_attempts_bucket >= MAX_REG_ATTEMPTS_PER_LOG2DISTANCE { + true + } else if let Entry::Vacant(_) = registrations.reg_attempts.entry(*node_id) { + debug!("Found new registration peer in uncontacted peers for topic {}. Peer: {:?}", topic_hash, node_id); + registrations.reg_attempts.insert(*node_id, RegistrationState::Ticket); + new_peers_bucket.push(enr.clone()); + false + } else { + true + } + }); + new_peers.append(&mut new_peers_bucket); + } + } + + // The count of active registration attempts for a distance after expired ads have been + // removed is less than the max number of registration attempts that should be active + // per bucket and is not equal to the total number of peers available in that bucket. + if active_reg_attempts_bucket < MAX_REG_ATTEMPTS_PER_LOG2DISTANCE + && registrations.reg_attempts.len() != bucket.num_entries() + { + for peer in bucket.iter() { + if new_peers_bucket.len() + active_reg_attempts_bucket + >= MAX_REG_ATTEMPTS_PER_LOG2DISTANCE + { + break; + } + let node_id = *peer.key.preimage(); + if let Entry::Vacant(_) = registrations.reg_attempts.entry(node_id) { + debug!( + "Found new registration peer in kbuckets of topic {}. Peer: {:?}", + topic_hash, + peer.key.preimage() + ); + registrations + .reg_attempts + .insert(node_id, RegistrationState::Ticket); + new_peers_bucket.push(peer.value.clone()) + } + } + new_peers.append(&mut new_peers_bucket); + } + } + let mut sent_regtopics = 0; + + for peer in new_peers { + if let Ok(node_contact) = NodeContact::try_from_enr(peer, self.config.ip_mode) + .map_err(|e| error!("Failed to send REGTOPIC to peer. Error: {:?}", e)) + { + self.reg_topic_request( + node_contact, + topic.clone(), + RequestTicket::RemotelyIssued(Vec::new()), + ); + // If an uncontacted peer has a faulty enr, don't count the registration attempt. + sent_regtopics += 1; + } + } + sent_regtopics + } else { + debug_unreachable!("Broken invariant, a kbuckets table should exist for topic hash"); + 0 + } + } + + /// Internal function that starts a topic lookup. + fn send_topic_queries( + &mut self, + topic_hash: TopicHash, + callback: Option, RequestError>>>, + ) { + let query = self + .active_topic_queries + .queries + .entry(topic_hash) + .or_insert(ActiveTopicQuery { + queried_peers: HashMap::new(), + results: HashMap::new(), + callback, + start: Instant::now(), + dry: false, + }); + + // Attempt to query max_topic_query_peers peers at a time. Possibly some peers will return more than one result + // (NODES of length > 1), or no results will be returned from that peer. + let max_topic_query_peers = self.config.max_nodes_response; + + let mut new_query_peers: Vec = Vec::new(); + + // Attempt sending a request to uncontacted peers if any. + if let Some(peers) = self.discovered_peers_topic.get_mut(&topic_hash) { + // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. + for bucket in peers.values_mut().rev() { + if new_query_peers.len() < max_topic_query_peers { + break; + } + bucket.retain(|node_id, enr| { + if new_query_peers.len() >= max_topic_query_peers { + true + } else if let Entry::Vacant(entry) = query.queried_peers.entry(*node_id) { + entry.insert(false); + new_query_peers.push(enr.clone()); + trace!( + "Found a new topic query peer {} in uncontacted peers of topic hash {}", + node_id, + topic_hash + ); + false + } else { + true + } + }); + } + } + + if let Some(kbuckets) = self.topics_kbuckets.get_mut(&topic_hash) { + // Prefer querying nodes further away, i.e. in buckets of further distance to topic, to avoid hotspots. + for kbuckets_entry in kbuckets.iter().rev() { + if new_query_peers.len() >= max_topic_query_peers { + break; + } + let node_id = *kbuckets_entry.node.key.preimage(); + let enr = kbuckets_entry.node.value; + + if let Entry::Vacant(entry) = query.queried_peers.entry(node_id) { + entry.insert(false); + new_query_peers.push(enr.clone()); + trace!( + "Found a new topic query peer {} in kbuckets of topic hash {}", + node_id, + topic_hash + ); + } + } + } + // If no new nodes can be found to query, let topic lookup wait for new peers or time out. + if new_query_peers.is_empty() { + debug!("Found no new peers to send TOPICQUERY to, setting query status to dry"); + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic_hash) { + query.dry = true; + let topic_key = NodeId::new(&topic_hash.as_bytes()); + self.start_findnode_query(QueryType::FindTopic(topic_key), None); + } + return; + } + + trace!("Sending TOPICQUERYs to {} new peers", new_query_peers.len()); + for enr in new_query_peers { + if let Ok(node_contact) = NodeContact::try_from_enr(enr.clone(), self.config.ip_mode) + .map_err(|e| error!("Failed to send TOPICQUERY to peer. Error: {:?}", e)) + { + self.topic_query_request(node_contact, topic_hash); } } } /// Internal function that starts a query. - fn start_findnode_query(&mut self, target_node: NodeId, callback: oneshot::Sender>) { + fn start_findnode_query( + &mut self, + query_type: QueryType, + callback: Option>>, + ) { let mut target = QueryInfo { - query_type: QueryType::FindNode(target_node), + query_type, untrusted_enrs: Default::default(), distances_to_request: DISTANCES_TO_REQUEST_PER_PEER, callback, @@ -442,7 +1250,7 @@ impl Service { { let mut kbuckets = self.kbuckets.write(); for closest in kbuckets.closest_values(&target_key) { - // Add the known ENR's to the untrusted list + // Add the known ENRs to the untrusted list target.untrusted_enrs.push(closest.value); // Add the key to the list for the query known_closest_peers.push(closest.key); @@ -451,8 +1259,10 @@ impl Service { if known_closest_peers.is_empty() { warn!("No known_closest_peers found. Return empty result without sending query."); - if target.callback.send(vec![]).is_err() { - warn!("Failed to callback"); + if let Some(callback) = target.callback { + if callback.send(vec![]).is_err() { + warn!("Failed to callback"); + } } } else { let query_config = FindNodeQueryConfig::new_from_config(&self.config); @@ -467,7 +1277,7 @@ impl Service { target_node: NodeId, num_nodes: usize, predicate: Box bool + Send>, - callback: oneshot::Sender>, + callback: Option>>, ) { let mut target = QueryInfo { query_type: QueryType::FindNode(target_node), @@ -485,7 +1295,7 @@ impl Service { { let mut kbuckets = self.kbuckets.write(); for closest in kbuckets.closest_values_predicate(&target_key, &kbucket_predicate) { - // Add the known ENR's to the untrusted list + // Add the known ENRs to the untrusted list target.untrusted_enrs.push(closest.value.clone()); // Add the key to the list for the query known_closest_peers.push(closest.into()); @@ -494,8 +1304,10 @@ impl Service { if known_closest_peers.is_empty() { warn!("No known_closest_peers found. Return empty result without sending query."); - if target.callback.send(vec![]).is_err() { - warn!("Failed to callback"); + if let Some(callback) = target.callback { + if callback.send(vec![]).is_err() { + warn!("Failed to callback"); + } } } else { let mut query_config = PredicateQueryConfig::new_from_config(&self.config); @@ -506,21 +1318,42 @@ impl Service { } /// Returns an ENR if one is known for the given NodeId. - pub fn find_enr(&self, node_id: &NodeId) -> Option { + pub fn find_enr(&mut self, node_id: &NodeId, include_untrusted_enrs: bool) -> Option { // check if we know this node id in our routing table let key = kbucket::Key::from(*node_id); if let kbucket::Entry::Present(entry, _) = self.kbuckets.write().entry(&key) { return Some(entry.value().clone()); } - // check the untrusted addresses for ongoing queries - for query in self.queries.iter() { - if let Some(enr) = query - .target() - .untrusted_enrs - .iter() - .find(|v| v.node_id() == *node_id) + for kbuckets in self.topics_kbuckets.values_mut() { + if let kbucket::Entry::Present(entry, _) = kbuckets.entry(&key) { + return Some(entry.value().clone()); + } + } + + if include_untrusted_enrs { + // check the untrusted addresses for ongoing queries + for query in self.queries.iter() { + if let Some(enr) = query + .target() + .untrusted_enrs + .iter() + .find(|v| v.node_id() == *node_id) + { + return Some(enr.clone()); + } + } + + // check the untrusted addresses for ongoing topic queries/registrations + for buckets in self + .discovered_peers_topic + .values() + .map(|buckets| buckets.values()) { - return Some(enr.clone()); + for bucket in buckets { + if let Some((_, enr)) = bucket.iter().find(|(v, _)| *v == node_id) { + return Some(enr.clone()); + } + } } } None @@ -529,10 +1362,11 @@ impl Service { /// Processes an RPC request from a peer. Requests respond to the received socket address, /// rather than the IP of the known ENR. fn handle_rpc_request(&mut self, node_address: NodeAddress, req: Request) { + debug!("Received RPC request: {} from: {}", req.body, node_address); let id = req.id; match req.body { RequestBody::FindNode { distances } => { - self.send_nodes_response(node_address, id, distances); + self.send_find_nodes_response(node_address, id, distances); } RequestBody::Ping { enr_seq } => { // check if we need to update the known ENR @@ -597,11 +1431,82 @@ impl Service { self.send_event(Discv5Event::TalkRequest(req)); } - RequestBody::RegisterTopic { .. } => { - debug!("Received RegisterTopic request which is unimplemented"); + RequestBody::RegisterTopic { topic, ticket } => { + let topic = Topic::new(topic); + + // Only advertise peer which have been added to our kbuckets, i.e. which have + // a contactable address in their enr. + if let Some(enr) = self.find_enr(&node_address.node_id, false) { + // Blacklist if node doesn't contain the given topic in its enr 'topics' field + let topic_in_enr = |topic_hash: &TopicHash| -> bool { + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { + if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) { + for topic in advertised_topics.topics_iter() { + if topic_hash == &topic.hash() { + return true; + } + } + } + } + false + }; + + if !topic_in_enr(&topic.hash()) { + warn!("The topic given in the REGTOPIC request body cannot be found in sender's 'topics' enr field. Blacklisting peer {}.", node_address.node_id); + ban_malicious_peer(self.config.ban_duration, node_address); + self.rpc_failure(id, RequestError::InvalidEnrTopicsField); + return; + } + + // If the node has not respected the wait time and arrives before the wait time has + // expired or more than 5 seconds later than it has expired, the peer is blacklisted + if let RequestTicket::LocallyIssued(ticket) = ticket { + let waited_time = ticket.req_time().elapsed(); + let wait_time = ticket.wait_time(); + if waited_time < wait_time || waited_time >= wait_time + WAIT_TIME_TOLERANCE + { + warn!("The REGTOPIC has not waited the time assigned in the ticket. Blacklisting peer {}.", node_address.node_id); + ban_malicious_peer(self.config.ban_duration, node_address); + self.rpc_failure(id, RequestError::InvalidWaitTime); + return; + } + } + + let mut new_ticket = Ticket::new( + node_address.node_id, + node_address.socket_addr.ip(), + topic.hash(), + tokio::time::Instant::now(), + Duration::default(), + ); + + // If there is no wait time and the ad is successfully registered as an ad, the new ticket is sent + // with wait time set to zero indicating successful registration. + if let Err((wait_time, e)) = + self.ads + .insert(enr, topic.hash(), node_address.socket_addr.ip()) + { + // The wait time on the new ticket to send is updated if there is wait time for the requesting + // node for this topic to register as an ad due to the current state of the topic table. + error!( + "Registration attempt from peer {} for topic hash {} failed. Error: {}", + node_address.node_id, topic, e + ); + new_ticket.set_wait_time(wait_time); + } + + let wait_time = new_ticket.wait_time(); + self.send_ticket_response( + node_address, + id, + topic, + ResponseTicket::LocallyIssued(new_ticket), + wait_time, + ); + } } - RequestBody::TopicQuery { .. } => { - debug!("Received TopicQuery request which is unimplemented"); + RequestBody::TopicQuery { topic } => { + self.send_topic_query_nodes_response(node_address, id, topic); } } } @@ -638,81 +1543,80 @@ impl Service { match response.body { ResponseBody::Nodes { total, mut nodes } => { // Currently a maximum of DISTANCES_TO_REQUEST_PER_PEER*BUCKET_SIZE peers can be returned. Datagrams have a max - // size of 1280 and ENR's have a max size of 300 bytes. + // size of 1280 and ENRs have a max size of 300 bytes. // // Bucket sizes should be 16. In this case, there should be no more than 5*DISTANCES_TO_REQUEST_PER_PEER responses, to return all required peers. - if total > 5 * DISTANCES_TO_REQUEST_PER_PEER as u64 { + if total > 5 * DISTANCES_TO_REQUEST_PER_PEER as Log2Distance { warn!( "NodesResponse has a total larger than {}, nodes will be truncated", DISTANCES_TO_REQUEST_PER_PEER * 5 ); } - // These are sanitized and ordered - let distances_requested = match &active_request.request_body { - RequestBody::FindNode { distances } => distances, - _ => unreachable!(), - }; - - // This could be an ENR request from the outer service. If so respond to the - // callback and End. - if let Some(CallbackResponse::Enr(callback)) = active_request.callback.take() { - // Currently only support requesting for ENR's. Verify this is the case. - if !distances_requested.is_empty() && distances_requested[0] != 0 { - error!("Retrieved a callback request that wasn't for a peer's ENR"); + // Distances are sanitized and ordered + if let RequestBody::FindNode { distances } = &active_request.request_body { + // This could be an ENR request from the outer service. If so respond to the + // callback and End. + if let Some(CallbackResponse::Enr(callback)) = + active_request.callback.take() + { + // Currently only support requesting for ENRs. Verify this is the case. + if !distances.is_empty() && distances[0] != 0 { + error!("Retrieved a callback request that wasn't for a peer's ENR"); + return; + } + // This must be for asking for an ENR + if nodes.len() > 1 { + warn!( + "Peer returned more than one ENR for itself. {}", + active_request.contact + ); + } + let response = nodes + .pop() + .ok_or(RequestError::InvalidEnr("Peer did not return an ENR")); + if let Err(e) = callback.send(response) { + warn!("Failed to send response in callback {:?}", e) + } return; - } - // This must be for asking for an ENR - if nodes.len() > 1 { - warn!( - "Peer returned more than one ENR for itself. {}", - active_request.contact - ); - } - let response = nodes - .pop() - .ok_or(RequestError::InvalidEnr("Peer did not return an ENR")); - if let Err(e) = callback.send(response) { - warn!("Failed to send response in callback {:?}", e) - } - return; - } - - // Filter out any nodes that are not of the correct distance - let peer_key: kbucket::Key = node_id.into(); - - // The distances we send are sanitized an ordered. - // We never send an ENR request in combination of other requests. - if distances_requested.len() == 1 && distances_requested[0] == 0 { - // we requested an ENR update - if nodes.len() > 1 { - warn!( - "Peer returned more than one ENR for itself. Blacklisting {}", - node_address - ); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); - nodes.retain(|enr| { - peer_key.log2_distance(&enr.node_id().into()).is_none() - }); - } - } else { - let before_len = nodes.len(); - nodes.retain(|enr| { - peer_key - .log2_distance(&enr.node_id().into()) - .map(|distance| distances_requested.contains(&distance)) - .unwrap_or_else(|| false) - }); - - if nodes.len() < before_len { - // Peer sent invalid ENRs. Blacklist the Node - warn!( - "Peer sent invalid ENR. Blacklisting {}", - active_request.contact - ); - let ban_timeout = self.config.ban_duration.map(|v| Instant::now() + v); - PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); + } else if !distances.is_empty() { + // This is a response to a FINDNODE request with specifically request distances + // Filter out any nodes that are not of the correct distance + + let peer_key: kbucket::Key = node_id.into(); + + // The distances we send are sanitized an ordered. + // We never send an ENR request in combination of other requests. + if distances.len() == 1 && distances[0] == 0 { + // we requested an ENR update + if nodes.len() > 1 { + warn!( + "Peer returned more than one ENR for itself. Blacklisting {}", + node_address + ); + ban_malicious_peer(self.config.ban_duration, node_address); + nodes.retain(|enr| { + peer_key.log2_distance(&enr.node_id().into()).is_none() + }); + } + } else { + let before_len = nodes.len(); + nodes.retain(|enr| { + peer_key + .log2_distance(&enr.node_id().into()) + .map(|distance| distances.contains(&distance)) + .unwrap_or_else(|| false) + }); + + if nodes.len() < before_len { + // Peer sent invalid ENRs. Blacklist the Node + warn!( + "Peer sent invalid ENR. Blacklisting {}", + active_request.contact + ); + ban_malicious_peer(self.config.ban_duration, node_address); + } + } } } @@ -727,7 +1631,7 @@ impl Service { "Nodes Response: {} of {} received", current_response.count, total ); - // if there are more requests coming, store the nodes and wait for + // if there are more responses coming, store the nodes and wait for // another response // We allow for implementations to send at a minimum 3 nodes per response. // We allow for the number of nodes to be returned as the maximum we emit. @@ -751,7 +1655,7 @@ impl Service { } debug!( - "Received a nodes response of len: {}, total: {}, from: {}", + "Received a NODES response of len: {}, total: {}, from: {}", nodes.len(), total, active_request.contact @@ -762,13 +1666,49 @@ impl Service { // ensure any mapping is removed in this rare case self.active_nodes_responses.remove(&node_id); - self.discovered(&node_id, nodes, active_request.query_id); + if let RequestBody::FindNode { .. } = &active_request.request_body { + self.discovered(&node_id, nodes, active_request.query_id); + } else if let RequestBody::TopicQuery { topic } = &active_request.request_body { + nodes.retain(|enr| { + if enr.node_id() == self.local_enr.read().node_id() { + // Don't add this node as a result to the query if it is currently advertising + // the topic and was returned as an ad in the NODES response. + return false; + } + if !(self.config.table_filter)(enr) { + return false; + } + // Ads are checked for validity, if they do not contain the topic in their enr, they are discarded + if let Some(topics) = enr.get(ENR_KEY_TOPICS) { + if let Ok(Some(advertised_topics)) = TopicsEnrField::decode(topics) + { + for advertised_topic in advertised_topics.topics_iter() { + if advertised_topic.hash() == *topic { + return true; + } + } + } + } + false + }); + if let Some(query) = self.active_topic_queries.queries.get_mut(topic) { + nodes.into_iter().for_each(|enr| { + trace!( + "Inserting node {} into query for topic hash {}", + enr.node_id(), + topic + ); + query.results.insert(enr.node_id(), enr); + }); + *query.queried_peers.entry(node_id).or_default() = true; + } + } } ResponseBody::Pong { enr_seq, ip, port } => { let socket = SocketAddr::new(ip, port); // perform ENR majority-based update if required. - // Only count votes that from peers we have contacted. + // Only count votes that are from peers we have contacted. let key: kbucket::Key = node_id.into(); let should_count = match self.kbuckets.write().entry(&key) { kbucket::Entry::Present(_, status) @@ -776,7 +1716,21 @@ impl Service { { true } - _ => false, + _ => { + let mut should_count = false; + for kbuckets in self.topics_kbuckets.values_mut() { + match kbuckets.entry(&key) { + kbucket::Entry::Present(_, status) + if status.is_connected() && !status.is_incoming() => + { + should_count = true; + break; + } + _ => {} + } + } + should_count + } }; if should_count { @@ -851,7 +1805,7 @@ impl Service { } // check if we need to request a new ENR - if let Some(enr) = self.find_enr(&node_id) { + if let Some(enr) = self.find_enr(&node_id, true) { if enr.seq() < enr_seq { // request an ENR update debug!("Requesting an ENR update from: {}", active_request.contact); @@ -864,7 +1818,7 @@ impl Service { }; self.send_rpc_request(active_request); } - self.connection_updated(node_id, ConnectionStatus::PongReceived(enr)); + self.connection_updated(node_id, ConnectionStatus::PongReceived(enr), None); } } ResponseBody::Talk { response } => { @@ -878,11 +1832,47 @@ impl Service { _ => error!("Invalid callback for response"), } } - ResponseBody::Ticket { .. } => { - error!("Received a TICKET response. This is unimplemented and should be unreachable."); - } - ResponseBody::RegisterConfirmation { .. } => { - error!("Received a RegisterConfirmation response. This is unimplemented and should be unreachable."); + ResponseBody::Ticket { + ticket, + wait_time, + topic, + } => { + if wait_time <= MAX_WAIT_TIME_TICKET { + let now = Instant::now(); + let peer_key: kbucket::Key = node_id.into(); + let topic = Topic::new(topic); + let topic_key: kbucket::Key = + NodeId::new(&topic.hash().as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + let registration_attempts = + self.registration_attempts.entry(topic.clone()).or_default(); + if let Some(reg_state) = registration_attempts + .entry(distance) + .or_default() + .reg_attempts + .get_mut(&node_id) + { + if wait_time > 0 { + if let ResponseTicket::RemotelyIssued(ticket_bytes) = ticket { + if let Err(e) = self.tickets.insert( + active_request.contact, + ticket_bytes, + Duration::from_secs(wait_time), + topic, + ) { + error!( + "Failed storing ticket from node id {}. Error {}", + node_id, e + ); + *reg_state = RegistrationState::TicketLimit(now); + } + } + } else { + *reg_state = RegistrationState::Confirmed(now); + } + } + } + } } } } else { @@ -971,16 +1961,101 @@ impl Service { self.send_rpc_request(active_request); } - /// Sends a NODES response, given a list of found ENR's. This function splits the nodes up - /// into multiple responses to ensure the response stays below the maximum packet size. - fn send_nodes_response( + /// Requests a node to advertise the sending node for a given topic hash. + fn reg_topic_request(&mut self, contact: NodeContact, topic: Topic, ticket: RequestTicket) { + let request_body = RequestBody::RegisterTopic { + topic: topic.topic(), + ticket, + }; + trace!("Sending reg topic to node {}", contact.socket_addr()); + self.send_rpc_request(ActiveRequest { + contact, + request_body, + query_id: None, + callback: None, + }); + } + + /// Queries a node for the ads that node currently advertises for a given topic. + fn topic_query_request(&mut self, contact: NodeContact, topic: TopicHash) { + let request_body = RequestBody::TopicQuery { topic }; + + let active_request = ActiveRequest { + contact, + request_body, + query_id: None, + callback: None, + }; + self.send_rpc_request(active_request); + } + + /// The response sent to every REGTOPIC request, as according to spec. + fn send_ticket_response( &mut self, node_address: NodeAddress, rpc_id: RequestId, - mut distances: Vec, + topic: Topic, + ticket: ResponseTicket, + wait_time: Duration, + ) { + let response = Response { + id: rpc_id, + body: ResponseBody::Ticket { + ticket, + wait_time: wait_time.as_secs(), + topic: topic.topic(), + }, + }; + trace!( + "Sending TICKET response to: {}. Response: {} ", + node_address, + response + ); + let _ = self + .handler_send + .send(HandlerIn::Response(node_address, Box::new(response))); + } + + /// Response to a topic query containing the nodes currently advertised for the + /// requested topic if any. + fn send_topic_query_nodes_response( + &mut self, + node_address: NodeAddress, + rpc_id: RequestId, + topic: TopicHash, + ) { + let nodes_to_send = self + .ads + .get_ad_nodes(topic) + .map(|ad| ad.node_record().clone()) + .collect::>(); + trace!( + "Sending NODES response(s) containing all together {} ads for topic hash {}", + nodes_to_send.len(), + topic + ); + self.send_nodes_response( + nodes_to_send, + node_address, + rpc_id, + "TOPICQUERY", + ResponseBody::Nodes { + total: 1u64, + nodes: Vec::new(), // `send_nodes_response` handles dividing `nodes_to_send` into multiple NODES responses + }, + ); + } + + /// Finds a list of ENRs in the local routing table at the given distances, to send in a + /// NODES response to a FINDNODE request. + fn send_find_nodes_response( + &mut self, + node_address: NodeAddress, + rpc_id: RequestId, + mut distances: Vec, ) { // NOTE: At most we only allow 5 distances to be sent (see the decoder). If each of these - // buckets are full, that equates to 80 ENR's to respond with. + // buckets are full, that equates to 80 ENRs to respond with. let mut nodes_to_send = Vec::new(); distances.sort_unstable(); @@ -1009,25 +2084,48 @@ impl Service { nodes_to_send.push(node); } } + self.send_nodes_response( + nodes_to_send, + node_address, + rpc_id, + "FINDNODE", + ResponseBody::Nodes { + total: 1u64, + nodes: Vec::new(), // `send_nodes_response` handles dividing `nodes_to_send` into multiple NODES responses + }, + ); + } + /// Sends a NODES response, given a list of ENRs. This function splits the nodes up + /// into multiple responses to ensure the response stays below the maximum packet size. + fn send_nodes_response( + &self, + nodes_to_send: Vec, + node_address: NodeAddress, + rpc_id: RequestId, + req_type: &str, + resp_body: ResponseBody, + ) { + debug!("Sending NODES response to {} request {}", req_type, rpc_id); // if there are no nodes, send an empty response if nodes_to_send.is_empty() { let response = Response { id: rpc_id, - body: ResponseBody::Nodes { - total: 1u64, - nodes: Vec::new(), - }, + body: resp_body.clone(), }; trace!( - "Sending empty FINDNODES response to: {}", + "Sending empty {} response to: {}", + req_type, node_address.node_id ); if let Err(e) = self .handler_send .send(HandlerIn::Response(node_address, Box::new(response))) { - warn!("Failed to send empty FINDNODES response {}", e) + warn!( + "Failed to send empty response {} to request {} response. Error: {}", + resp_body, req_type, e + ) } } else { // build the NODES response @@ -1038,14 +2136,14 @@ impl Service { for enr in nodes_to_send.into_iter() { let entry_size = rlp::encode(&enr).len(); // Responses assume that a session is established. Thus, on top of the encoded - // ENR's the packet should be a regular message. A regular message has an IV (16 - // bytes), and a header of 55 bytes. The find-nodes RPC requires 16 bytes for the ID and the + // ENRs the packet should be a regular message. A regular message has an IV (16 + // bytes), and a header of 55 bytes. The FINDNODE RPC requires 16 bytes for the ID and the // `total` field. Also there is a 16 byte HMAC for encryption and an extra byte for // RLP encoding. // - // We could also be responding via an autheader which can take up to 282 bytes in its + // We could also be responding via an auth header which can take up to 282 bytes in its // header. - // As most messages will be normal messages we will try and pack as many ENR's we + // As most messages will be normal messages we will try and pack as many ENRs we // can in and drop the response packet if a user requests an auth message of a very // packed response. // @@ -1068,18 +2166,22 @@ impl Service { let responses: Vec = to_send_nodes .into_iter() - .map(|nodes| Response { - id: rpc_id.clone(), - body: ResponseBody::Nodes { + .map(|nodes| { + let body = ResponseBody::Nodes { total: (rpc_index + 1) as u64, nodes, - }, + }; + Response { + id: rpc_id.clone(), + body, + } }) .collect(); for response in responses { trace!( - "Sending FINDNODES response to: {}. Response: {} ", + "Sending {} NODES response to: {}. Response: {} ", + req_type, node_address, response ); @@ -1087,7 +2189,7 @@ impl Service { node_address.clone(), Box::new(response), )) { - warn!("Failed to send FINDNODES response {}", e) + warn!("Failed to send {} response {}", req_type, e) } } } @@ -1101,7 +2203,7 @@ impl Service { request_body: RequestBody, ) { // find the ENR associated with the query - if let Some(enr) = self.find_enr(&return_peer) { + if let Some(enr) = self.find_enr(&return_peer, true) { match NodeContact::try_from_enr(enr, self.config.ip_mode) { Ok(contact) => { let active_request = ActiveRequest { @@ -1132,12 +2234,13 @@ impl Service { } /// Sends generic RPC requests. Each request gets added to known outputs, awaiting a response. - fn send_rpc_request(&mut self, active_request: ActiveRequest) { + fn send_rpc_request(&mut self, active_request: ActiveRequest) -> RequestId { // Generate a random rpc_id which is matched per node id let id = RequestId::random(); + let request_body = active_request.request_body.clone(); let request: Request = Request { id: id.clone(), - body: active_request.request_body.clone(), + body: request_body, }; let contact = active_request.contact.clone(); @@ -1147,8 +2250,9 @@ impl Service { .send(HandlerIn::Request(contact, Box::new(request))) .is_ok() { - self.active_requests.insert(id, active_request); + self.active_requests.insert(id.clone(), active_request); } + id } fn send_event(&mut self, event: Discv5Event) { @@ -1160,58 +2264,60 @@ impl Service { } } - /// Processes discovered peers from a query. + /// Processes discovered peers from a FINDNODE query looking up a node id or a topic hash. fn discovered(&mut self, source: &NodeId, mut enrs: Vec, query_id: Option) { let local_id = self.local_enr.read().node_id(); + enrs.retain(|enr| { - if enr.node_id() == local_id { + let node_id = enr.node_id(); + // If we are requesting the target of the query, this ENR could be the result of requesting the + // target-nodes own id. We don't want to add this as a "new" discovered peer in the query, so we + // remove it from the discovered list here. + if local_id == node_id { return false; } - - // If any of the discovered nodes are in the routing table, and there contains an older ENR, update it. - // If there is an event stream send the Discovered event + // If there is an event stream send the DiscoveredPeerTopic event. if self.config.report_discovered_peers { self.send_event(Discv5Event::Discovered(enr.clone())); } + // The remaining ENRs are used if this request was part of a query. If we are + // requesting the target of the query, this ENR could be the result of requesting the + // target-nodes own id. We don't want to add this as a "new" discovered peer in the + // query, so we remove it from the discovered list here. + if source == &node_id { + return false; + } + // Ignore peers that don't pass the table filter + if !(self.config.table_filter)(enr) { + return false; + } - // ignore peers that don't pass the table filter - if (self.config.table_filter)(enr) { - let key = kbucket::Key::from(enr.node_id()); - - // If the ENR exists in the routing table and the discovered ENR has a greater - // sequence number, perform some filter checks before updating the enr. + let key = kbucket::Key::from(enr.node_id()); - let must_update_enr = match self.kbuckets.write().entry(&key) { - kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), - kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), - _ => false, - }; + // If the ENR exists in the routing table and the discovered ENR has a greater + // sequence number, perform some filter checks before updating the enr. - if must_update_enr { - if let UpdateResult::Failed(reason) = - self.kbuckets.write().update_node(&key, enr.clone(), None) - { - self.peers_to_ping.remove(&enr.node_id()); - debug!( - "Failed to update discovered ENR. Node: {}, Reason: {:?}", - source, reason - ); - - return false; // Remove this peer from the discovered list if the update failed - } + let must_update_enr = match self.kbuckets.write().entry(&key) { + kbucket::Entry::Present(entry, _) => entry.value().seq() < enr.seq(), + kbucket::Entry::Pending(mut entry, _) => entry.value().seq() < enr.seq(), + _ => false, + }; + if must_update_enr { + if let UpdateResult::Failed(reason) = + self.kbuckets.write().update_node(&key, enr.clone(), None) + { + self.peers_to_ping.remove(&enr.node_id()); + debug!( + "Failed to update discovered ENR. Node: {}, Reason: {:?}", + source, reason + ); + return false; // Remove this peer from the discovered list if the update failed } - } else { - return false; // Didn't pass the table filter remove the peer } - - // The remaining ENRs are used if this request was part of a query. If we are - // requesting the target of the query, this ENR could be the result of requesting the - // target-nodes own id. We don't want to add this as a "new" discovered peer in the - // query, so we remove it from the discovered list here. - source != &enr.node_id() + true }); - // if this is part of a query, update the query + // The remaining ENRs are used if this request was part of a query. Update the query if let Some(query_id) = query_id { if let Some(query) = self.queries.get_mut(query_id) { let mut peer_count = 0; @@ -1236,12 +2342,20 @@ impl Service { /// Update the connection status of a node in the routing table. /// This tracks whether or not we should be pinging peers. Disconnected peers are removed from - /// the queue and newly added peers to the routing table are added to the queue. - fn connection_updated(&mut self, node_id: NodeId, new_status: ConnectionStatus) { + /// the queue and newly added peers to the routing table (or topics kbuckets) are added to the queue. + fn connection_updated( + &mut self, + node_id: NodeId, + new_status: ConnectionStatus, + topic_hash: Option, + ) { // Variables to that may require post-processing let mut ping_peer = None; let mut event_to_send = None; + let kbuckets_topic = + topic_hash.and_then(|topic_hash| self.topics_kbuckets.get_mut(&topic_hash)); + let key = kbucket::Key::from(node_id); match new_status { ConnectionStatus::Connected(enr, direction) => { @@ -1250,7 +2364,20 @@ impl Service { state: ConnectionState::Connected, direction, }; - match self.kbuckets.write().insert_or_update(&key, enr, status) { + let insert_result = if let Some(kbuckets) = kbuckets_topic { + kbuckets.insert_or_update(&key, enr, status) + } else { + self.kbuckets.write().insert_or_update(&key, enr, status) + }; + + if topic_hash.is_some() { + trace!( + "Inserting node into kbucket of topic gave result: {:?}", + insert_result + ); + } + + match insert_result { InsertResult::Inserted => { // We added this peer to the table debug!("New connected node added to routing table: {}", node_id); @@ -1284,11 +2411,12 @@ impl Service { } } ConnectionStatus::PongReceived(enr) => { - match self - .kbuckets - .write() - .update_node(&key, enr, Some(ConnectionState::Connected)) - { + match self.kbuckets.write().update_node( + &key, + enr.clone(), + Some(ConnectionState::Connected), + ) { + UpdateResult::Failed(FailureReason::KeyNonExistent) => {} UpdateResult::Failed(reason) => { self.peers_to_ping.remove(&node_id); debug!( @@ -1300,16 +2428,37 @@ impl Service { debug!("Updated {:?}", update) } // Updated ENR successfully. } + for kbuckets in self.topics_kbuckets.values_mut() { + match kbuckets.update_node(&key, enr.clone(), Some(ConnectionState::Connected)) + { + UpdateResult::Failed(FailureReason::KeyNonExistent) => {} + UpdateResult::Failed(reason) => { + self.peers_to_ping.remove(&node_id); + debug!( + "Could not update ENR from pong. Node: {}, reason: {:?}", + node_id, reason + ); + } + update => { + debug!("Updated {:?}", update) + } // Updated ENR successfully. + } + } } ConnectionStatus::Disconnected => { + let update_result = if let Some(kbuckets) = kbuckets_topic { + kbuckets.update_node_status(&key, ConnectionState::Disconnected, None) + } else { + self.kbuckets.write().update_node_status( + &key, + ConnectionState::Disconnected, + None, + ) + }; // If the node has disconnected, remove any ping timer for the node. - match self.kbuckets.write().update_node_status( - &key, - ConnectionState::Disconnected, - None, - ) { + match update_result { UpdateResult::Failed(reason) => match reason { - FailureReason::KeyNonExistant => {} + FailureReason::KeyNonExistent => {} others => { warn!( "Could not update node to disconnected. Node: {}, Reason: {:?}", @@ -1350,7 +2499,12 @@ impl Service { /// The equivalent of libp2p `inject_connected()` for a udp session. We have no stream, but a /// session key-pair has been negotiated. - fn inject_session_established(&mut self, enr: Enr, direction: ConnectionDirection) { + fn inject_session_established( + &mut self, + enr: Enr, + direction: ConnectionDirection, + topic_hash: Option, + ) { // Ignore sessions with non-contactable ENRs if self.config.ip_mode.get_contactable_addr(&enr).is_none() { return; @@ -1361,7 +2515,11 @@ impl Service { "Session established with Node: {}, direction: {}", node_id, direction ); - self.connection_updated(node_id, ConnectionStatus::Connected(enr, direction)); + self.connection_updated( + node_id, + ConnectionStatus::Connected(enr, direction), + topic_hash, + ); } /// A session could not be established or an RPC request timed-out (after a few retries, if @@ -1375,7 +2533,7 @@ impl Service { Some(CallbackResponse::Enr(callback)) => { callback .send(Err(error)) - .unwrap_or_else(|_| debug!("Couldn't send TALK error response to user")); + .unwrap_or_else(|_| debug!("Couldn't send ENR error response to user")); return; } Some(CallbackResponse::Talk(callback)) => { @@ -1424,6 +2582,40 @@ impl Service { } } } + RequestBody::TopicQuery { topic } => { + if let Some(query) = self.active_topic_queries.queries.get_mut(&topic) { + if let Some(exhausted) = query.queried_peers.get_mut(&node_id) { + *exhausted = true; + debug!( + "Failed TOPICQUERY request: {} for node: {}, reason {:?} ", + active_request.request_body, active_request.contact, error + ); + } + } + self.connection_updated(node_id, ConnectionStatus::Disconnected, Some(topic)); + return; + } + RequestBody::RegisterTopic { topic, ticket: _ } => { + let peer_key: kbucket::Key = node_id.into(); + let topic = Topic::new(topic); + let topic_hash = topic.hash(); + let topic_key: kbucket::Key = + NodeId::new(&topic_hash.as_bytes()).into(); + if let Some(distance) = peer_key.log2_distance(&topic_key) { + // Remove the registration attempt before disconnecting the peer. + let registration_attempts = + self.registration_attempts.entry(topic).or_default(); + if let Some(bucket) = registration_attempts.get_mut(&distance) { + bucket.reg_attempts.remove(&node_id); + } + } + self.connection_updated( + node_id, + ConnectionStatus::Disconnected, + Some(topic_hash), + ); + return; + } // for all other requests, if any are queries, mark them as failures. _ => { if let Some(query_id) = active_request.query_id { @@ -1443,7 +2635,7 @@ impl Service { } } - self.connection_updated(node_id, ConnectionStatus::Disconnected); + self.connection_updated(node_id, ConnectionStatus::Disconnected, None); } } @@ -1466,6 +2658,35 @@ impl Service { .await } + /// A future that maintains the topic kbuckets and inserts nodes when required. This optionally + /// returns the `Discv5Event::NodeInsertedTopics` variant if a new node has been inserted into + /// the routing table. + async fn bucket_maintenance_poll_topics( + kbuckets: impl Iterator)>, + ) -> Option { + // Drain applied pending entries from the routing table. + let mut update_kbuckets_futures = Vec::new(); + for (topic_hash, topic_kbuckets) in kbuckets { + update_kbuckets_futures.push(future::poll_fn(move |_cx| { + if let Some(entry) = (*topic_kbuckets).take_applied_pending() { + let node_id = entry.inserted.into_preimage(); + let replaced = entry.evicted.map(|n| n.key.into_preimage()); + return Poll::Ready(format!( + "Node {} has been inserted into kbuckets of topic {}. Replaced: {:?}", + node_id, topic_hash, replaced + )); + } + Poll::Pending + })); + } + if update_kbuckets_futures.is_empty() { + None + } else { + let (event, _, _) = select_all(update_kbuckets_futures).await; + Some(event) + } + } + /// A future the maintains active queries. This returns completed and timed out queries, as /// well as queries which need to be driven further with extra requests. async fn query_event_poll(queries: &mut QueryPool) -> QueryEvent { @@ -1474,7 +2695,11 @@ impl Service { QueryPoolState::Waiting(Some((query, return_peer))) => { let node_id = return_peer; let request_body = query.target().rpc_request(return_peer); - Poll::Ready(QueryEvent::Waiting(query.id(), node_id, request_body)) + Poll::Ready(QueryEvent::Waiting( + query.id(), + node_id, + Box::new(request_body), + )) } QueryPoolState::Timeout(query) => { warn!("Query id: {:?} timed out", query.id()); @@ -1486,11 +2711,17 @@ impl Service { } } +/// If a peer behaves maliciously, the peer can be banned for a certain time span. +pub fn ban_malicious_peer(ban_duration: Option, node_address: NodeAddress) { + let ban_timeout = ban_duration.map(|v| Instant::now() + v); + PERMIT_BAN_LIST.write().ban(node_address, ban_timeout); +} + /// The result of the `query_event_poll` indicating an action is required to further progress an /// active query. enum QueryEvent { /// The query is waiting for a peer to be contacted. - Waiting(QueryId, NodeId, RequestBody), + Waiting(QueryId, NodeId, Box), /// The query has timed out, possible returning peers. TimedOut(Box>), /// The query has completed successfully. diff --git a/src/service/query_info.rs b/src/service/query_info.rs index 9ab828f53..92c4e0f82 100644 --- a/src/service/query_info.rs +++ b/src/service/query_info.rs @@ -13,7 +13,7 @@ pub struct QueryInfo { pub untrusted_enrs: SmallVec<[Enr; 16]>, /// A callback channel for the service that requested the query. - pub callback: oneshot::Sender>, + pub callback: Option>>, /// The number of distances we request for each peer. /// NOTE: This must not be larger than 127. @@ -25,13 +25,16 @@ pub struct QueryInfo { pub enum QueryType { /// The user requested a `FIND_NODE` query to be performed. It should be reported when finished. FindNode(NodeId), + /// The user requested a `FIND_NODE` query to be performed to find the nodes closest to a topic + /// key. It should be reported when finished. + FindTopic(NodeId), } impl QueryInfo { /// Builds an RPC Request, given the QueryInfo pub(crate) fn rpc_request(&self, peer: NodeId) -> RequestBody { match self.query_type { - QueryType::FindNode(node_id) => { + QueryType::FindNode(node_id) | QueryType::FindTopic(node_id) => { let distances = findnode_log2distance(node_id, peer, self.distances_to_request) .unwrap_or_else(|| vec![0]); RequestBody::FindNode { distances } @@ -43,7 +46,7 @@ impl QueryInfo { impl crate::query_pool::TargetKey for QueryInfo { fn key(&self) -> Key { match self.query_type { - QueryType::FindNode(ref node_id) => { + QueryType::FindNode(ref node_id) | QueryType::FindTopic(ref node_id) => { Key::new_raw(*node_id, *GenericArray::from_slice(&node_id.raw())) } } diff --git a/src/service/test.rs b/src/service/test.rs index caabe1899..f6fc2e942 100644 --- a/src/service/test.rs +++ b/src/service/test.rs @@ -1,7 +1,6 @@ #![cfg(test)] use super::*; - use crate::{ handler::Handler, kbucket, @@ -68,7 +67,7 @@ async fn build_service( let kbuckets = Arc::new(RwLock::new(KBucketsTable::new( local_enr.read().node_id().into(), - Duration::from_secs(60), + KBUCKET_PENDING_TIMEOUT, config.incoming_bucket_limit, table_filter, bucket_filter, @@ -92,6 +91,16 @@ async fn build_service( peers_to_ping: HashSetDelay::new(config.ping_interval), discv5_recv, event_stream: None, + ads: Ads::new(Duration::from_secs(60 * 15), 100, 50000, 10, 3), + tickets: Tickets::new(Duration::from_secs(60 * 15)), + registration_attempts: HashMap::new(), + topic_lookups: Default::default(), + topics_kbuckets: HashMap::new(), + discovered_peers_topic: HashMap::new(), + active_topic_queries: ActiveTopicQueries::new( + config.topic_query_timeout, + config.max_nodes_response, + ), exit, config, }