From 66f41d8780b592b82448fea73e44aaeea0f0b388 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 12 Oct 2023 16:40:33 +0200 Subject: [PATCH 001/122] Code move --- .../src/net/routing/{ => dispatcher}/face.rs | 70 +- zenoh/src/net/routing/dispatcher/mod.rs | 24 + zenoh/src/net/routing/dispatcher/pubsub.rs | 785 +++++++++ zenoh/src/net/routing/dispatcher/queries.rs | 1076 ++++++++++++ .../net/routing/{ => dispatcher}/resource.rs | 124 +- zenoh/src/net/routing/dispatcher/tables.rs | 316 ++++ zenoh/src/net/routing/hat/mod.rs | 195 +++ zenoh/src/net/routing/{ => hat}/network.rs | 4 +- zenoh/src/net/routing/{ => hat}/pubsub.rs | 830 +-------- zenoh/src/net/routing/{ => hat}/queries.rs | 1123 +----------- zenoh/src/net/routing/mod.rs | 7 +- zenoh/src/net/routing/router.rs | 545 +----- zenoh/src/net/runtime/adminspace.rs | 8 +- zenoh/src/net/runtime/mod.rs | 4 +- zenoh/src/net/tests/tables.rs | 1504 ++++++++--------- zenoh/src/session.rs | 2 +- 16 files changed, 3412 insertions(+), 3205 deletions(-) rename zenoh/src/net/routing/{ => dispatcher}/face.rs (90%) create mode 100644 zenoh/src/net/routing/dispatcher/mod.rs create mode 100644 zenoh/src/net/routing/dispatcher/pubsub.rs create mode 100644 zenoh/src/net/routing/dispatcher/queries.rs rename zenoh/src/net/routing/{ => dispatcher}/resource.rs (88%) create mode 100644 zenoh/src/net/routing/dispatcher/tables.rs create mode 100644 zenoh/src/net/routing/hat/mod.rs rename zenoh/src/net/routing/{ => hat}/network.rs (99%) rename zenoh/src/net/routing/{ => hat}/pubsub.rs (58%) rename zenoh/src/net/routing/{ => hat}/queries.rs (53%) diff --git a/zenoh/src/net/routing/face.rs b/zenoh/src/net/routing/dispatcher/face.rs similarity index 90% rename from zenoh/src/net/routing/face.rs rename to zenoh/src/net/routing/dispatcher/face.rs index d84f173d26..17bf398bc1 100644 --- a/zenoh/src/net/routing/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -11,7 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::router::*; +use super::super::router::*; +use super::tables::{Tables, TablesLock}; +use super::{resource::*, tables}; use std::collections::{HashMap, HashSet}; use std::fmt; use std::sync::Arc; @@ -28,26 +30,26 @@ use zenoh_transport::stats::TransportStats; use zenoh_transport::{Primitives, TransportMulticast}; pub struct FaceState { - pub(super) id: usize, - pub(super) zid: ZenohId, - pub(super) whatami: WhatAmI, + pub(crate) id: usize, + pub(crate) zid: ZenohId, + pub(crate) whatami: WhatAmI, #[cfg(feature = "stats")] - pub(super) stats: Option>, - pub(super) primitives: Arc, - pub(super) link_id: usize, - pub(super) local_mappings: HashMap>, - pub(super) remote_mappings: HashMap>, - pub(super) local_subs: HashSet>, - pub(super) remote_subs: HashSet>, - pub(super) local_qabls: HashMap, QueryableInfo>, - pub(super) remote_qabls: HashSet>, - pub(super) next_qid: RequestId, - pub(super) pending_queries: HashMap>, - pub(super) mcast_group: Option, + pub(crate) stats: Option>, + pub(crate) primitives: Arc, + pub(crate) link_id: usize, + pub(crate) local_mappings: HashMap>, + pub(crate) remote_mappings: HashMap>, + pub(crate) local_subs: HashSet>, + pub(crate) remote_subs: HashSet>, + pub(crate) local_qabls: HashMap, QueryableInfo>, + pub(crate) remote_qabls: HashSet>, + pub(crate) next_qid: RequestId, + pub(crate) pending_queries: HashMap>, + pub(crate) mcast_group: Option, } impl FaceState { - pub(super) fn new( + pub(crate) fn new( id: usize, zid: ZenohId, whatami: WhatAmI, @@ -78,7 +80,7 @@ impl FaceState { #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] - pub(super) fn get_mapping( + pub(crate) fn get_mapping( &self, prefixid: &ExprId, mapping: Mapping, @@ -89,7 +91,7 @@ impl FaceState { } } - pub(super) fn get_next_local_id(&self) -> ExprId { + pub(crate) fn get_next_local_id(&self) -> ExprId { let mut id = 1; while self.local_mappings.get(&id).is_some() || self.remote_mappings.get(&id).is_some() { id += 1; @@ -97,8 +99,14 @@ impl FaceState { id } - pub(super) fn get_router(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables.routers_net.as_ref().unwrap().get_link(self.link_id) { + pub(crate) fn get_router(&self, tables: &Tables, nodeid: &u64) -> Option { + match tables + .hat + .routers_net + .as_ref() + .unwrap() + .get_link(self.link_id) + { Some(link) => match link.get_zid(nodeid) { Some(router) => Some(*router), None => { @@ -119,8 +127,14 @@ impl FaceState { } } - pub(super) fn get_peer(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables.peers_net.as_ref().unwrap().get_link(self.link_id) { + pub(crate) fn get_peer(&self, tables: &Tables, nodeid: &u64) -> Option { + match tables + .hat + .peers_net + .as_ref() + .unwrap() + .get_link(self.link_id) + { Some(link) => match link.get_zid(nodeid) { Some(router) => Some(*router), None => { @@ -185,7 +199,7 @@ impl Primitives for Face { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { + if rtables.hat.full_net(WhatAmI::Peer) { if let Some(peer) = self .state .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) @@ -238,7 +252,7 @@ impl Primitives for Face { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { + if rtables.hat.full_net(WhatAmI::Peer) { if let Some(peer) = self .state .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) @@ -289,7 +303,7 @@ impl Primitives for Face { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { + if rtables.hat.full_net(WhatAmI::Peer) { if let Some(peer) = self .state .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) @@ -342,7 +356,7 @@ impl Primitives for Face { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { + if rtables.hat.full_net(WhatAmI::Peer) { if let Some(peer) = self .state .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) @@ -432,7 +446,7 @@ impl Primitives for Face { } fn send_close(&self) { - super::router::close_face(&self.tables, &Arc::downgrade(&self.state)); + tables::close_face(&self.tables, &Arc::downgrade(&self.state)); } } diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs new file mode 100644 index 0000000000..53c32fb5ff --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/mod.rs @@ -0,0 +1,24 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +pub mod face; +pub mod pubsub; +pub mod queries; +pub mod resource; +pub mod tables; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs new file mode 100644 index 0000000000..277ed45843 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -0,0 +1,785 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::super::hat::network::Network; +use super::face::FaceState; +use super::resource::{DataRoutes, Direction, PullCaches, Resource, Route}; +use super::tables::{RoutingExpr, Tables}; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::convert::TryFrom; +use std::sync::Arc; +use std::sync::RwLock; +use zenoh_core::zread; +use zenoh_protocol::{ + core::{key_expr::OwnedKeyExpr, WhatAmI, WireExpr, ZenohId}, + network::{ + declare::{ext, Mode}, + Push, + }, + zenoh::PushBody, +}; +use zenoh_sync::get_mut_unchecked; + +#[inline] +fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, +) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + ( + face.clone(), + key_expr.to_owned(), + if source != 0 { + Some(source as u16) + } else { + None + }, + ) + }); + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +fn compute_data_route( + tables: &Tables, + expr: &mut RoutingExpr, + source: Option, + source_type: WhatAmI, +) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return Arc::new(route); + } + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !tables.hat.full_net(WhatAmI::Peer) + || *tables + .hat + .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = tables.hat.routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &mres.context().router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().peer_subs, + ); + } + } + + if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().peer_subs, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), None) + }); + } + } + } + } + } + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + None, + ), + ); + } + Arc::new(route) +} + +fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } + Arc::new(pull_caches) +} + +pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = tables + .hat + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.routers_data_routes[idx.index()] = + compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + } + + routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && tables.hat.full_net(WhatAmI::Peer) + { + let indexes = tables + .hat + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.peers_data_routes[idx.index()] = + compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + } + } + if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + routes.client_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + routes.client_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + } + routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); + routes +} + +pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = tables + .hat + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = + compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + } + + res_mut.context_mut().peer_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && tables.hat.full_net(WhatAmI::Peer) + { + let indexes = tables + .hat + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = + compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + } + } + if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + res_mut.context_mut().peer_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = + Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + } + res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); + } +} + +pub(crate) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { + compute_data_routes(tables, res); + let res = get_mut_unchecked(res); + for child in res.childs.values_mut() { + compute_data_routes_from(tables, child); + } +} + +pub(crate) fn compute_matches_data_routes_<'a>( + tables: &'a Tables, + res: &'a Arc, +) -> Vec<(Arc, DataRoutes)> { + let mut routes = vec![]; + if res.context.is_some() { + routes.push((res.clone(), compute_data_routes_(tables, res))); + for match_ in &res.context().matches { + let match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + let match_routes = compute_data_routes_(tables, &match_); + routes.push((match_, match_routes)); + } + } + } + routes +} + +pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + get_mut_unchecked(res).context_mut().valid_data_routes = false; + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + } + } + } +} + +macro_rules! treat_timestamp { + ($hlc:expr, $payload:expr, $drop:expr) => { + // if an HLC was configured (via Config.add_timestamp), + // check DataInfo and add a timestamp if there isn't + if let Some(hlc) = $hlc { + if let PushBody::Put(data) = &mut $payload { + if let Some(ref ts) = data.timestamp { + // Timestamp is present; update HLC with it (possibly raising error if delta exceed) + match hlc.update_with_timestamp(ts) { + Ok(()) => (), + Err(e) => { + if $drop { + log::error!( + "Error treating timestamp for received Data ({}). Drop it!", + e + ); + return; + } else { + data.timestamp = Some(hlc.new_timestamp()); + log::error!( + "Error treating timestamp for received Data ({}). Replace timestamp: {:?}", + e, + data.timestamp); + } + } + } + } else { + // Timestamp not present; add one + data.timestamp = Some(hlc.new_timestamp()); + log::trace!("Adding timestamp to DataInfo: {:?}", data.timestamp); + } + } + } + } +} + +#[inline] +fn get_data_route( + tables: &Tables, + face: &FaceState, + res: &Option>, + expr: &mut RoutingExpr, + routing_context: u64, +) -> Arc { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => { + let routers_net = tables.hat.routers_net.as_ref().unwrap(); + let local_context = routers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.routers_data_route(local_context)) + .unwrap_or_else(|| { + compute_data_route(tables, expr, Some(local_context), face.whatami) + }) + } + WhatAmI::Peer => { + if tables.hat.full_net(WhatAmI::Peer) { + let peers_net = tables.hat.peers_net.as_ref().unwrap(); + let local_context = peers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.peers_data_route(local_context)) + .unwrap_or_else(|| { + compute_data_route(tables, expr, Some(local_context), face.whatami) + }) + } else { + res.as_ref() + .and_then(|res| res.peer_data_route()) + .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + } + } + _ => res + .as_ref() + .and_then(|res| res.routers_data_route(0)) + .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + }, + WhatAmI::Peer => { + if tables.hat.full_net(WhatAmI::Peer) { + match face.whatami { + WhatAmI::Router | WhatAmI::Peer => { + let peers_net = tables.hat.peers_net.as_ref().unwrap(); + let local_context = + peers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.peers_data_route(local_context)) + .unwrap_or_else(|| { + compute_data_route(tables, expr, Some(local_context), face.whatami) + }) + } + _ => res + .as_ref() + .and_then(|res| res.peers_data_route(0)) + .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + } + } else { + res.as_ref() + .and_then(|res| match face.whatami { + WhatAmI::Client => res.client_data_route(), + _ => res.peer_data_route(), + }) + .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + } + } + _ => res + .as_ref() + .and_then(|res| res.client_data_route()) + .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + } +} + +#[inline] +fn get_matching_pulls( + tables: &Tables, + res: &Option>, + expr: &mut RoutingExpr, +) -> Arc { + res.as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| ctx.matching_pulls.clone()) + .unwrap_or_else(|| compute_matching_pulls(tables, expr)) +} + +macro_rules! cache_data { + ( + $matching_pulls:expr, + $expr:expr, + $payload:expr + ) => { + for context in $matching_pulls.iter() { + get_mut_unchecked(&mut context.clone()) + .last_values + .insert($expr.full_expr().to_string(), $payload.clone()); + } + }; +} + +#[inline] +fn should_route( + tables: &Tables, + src_face: &FaceState, + outface: &Arc, + expr: &mut RoutingExpr, +) -> bool { + if src_face.id != outface.id + && match (src_face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || outface.whatami != WhatAmI::Peer + || tables.hat.peers_net.is_none() + || tables.zid + == *tables.hat.elect_router( + &tables.zid, + expr.full_expr(), + tables.hat.get_router_links(outface.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || outface.whatami != WhatAmI::Peer + || tables.hat.full_net(WhatAmI::Peer) + || tables.hat.failover_brokering(src_face.zid, outface.zid)); + } + false +} + +#[cfg(feature = "stats")] +macro_rules! inc_stats { + ( + $face:expr, + $txrx:ident, + $space:ident, + $body:expr + ) => { + paste::paste! { + if let Some(stats) = $face.stats.as_ref() { + use zenoh_buffers::SplitBuffer; + match &$body { + PushBody::Put(p) => { + stats.[<$txrx _z_put_msgs>].[](1); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + } + PushBody::Del(_) => { + stats.[<$txrx _z_del_msgs>].[](1); + } + } + } + } + }; +} + +#[allow(clippy::too_many_arguments)] +pub fn full_reentrant_route_data( + tables_ref: &RwLock, + face: &FaceState, + expr: &WireExpr, + ext_qos: ext::QoSType, + mut payload: PushBody, + routing_context: u64, +) { + let tables = zread!(tables_ref); + match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { + Some(prefix) => { + log::trace!( + "Route data for res {}{}", + prefix.expr(), + expr.suffix.as_ref() + ); + let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); + + #[cfg(feature = "stats")] + let admin = expr.full_expr().starts_with("@/"); + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, rx, user, payload) + } else { + inc_stats!(face, rx, admin, payload) + } + + if tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || tables.hat.peers_net.is_none() + || tables.zid + == *tables.hat.elect_router( + &tables.zid, + expr.full_expr(), + tables.hat.get_router_links(face.zid), + ) + { + let res = Resource::get_resource(&prefix, expr.suffix); + let route = get_data_route(&tables, face, &res, &mut expr, routing_context); + let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); + + if !(route.is_empty() && matching_pulls.is_empty()) { + treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); + + if route.len() == 1 && matching_pulls.len() == 0 { + let (outface, key_expr, context) = route.values().next().unwrap(); + if should_route(&tables, face, outface, &mut expr) { + drop(tables); + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } + + outface.primitives.send_push(Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: context.unwrap_or(0), + }, + payload, + }) + } + } else { + if !matching_pulls.is_empty() { + let lock = zlock!(tables.pull_caches_lock); + cache_data!(matching_pulls, expr, payload); + drop(lock); + } + + if tables.whatami == WhatAmI::Router { + let route = route + .values() + .filter(|(outface, _key_expr, _context)| { + should_route(&tables, face, outface, &mut expr) + }) + .cloned() + .collect::>(); + + drop(tables); + for (outface, key_expr, context) in route { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } + + outface.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: context.unwrap_or(0), + }, + payload: payload.clone(), + }) + } + } else { + drop(tables); + for (outface, key_expr, context) in route.values() { + if face.id != outface.id + && match ( + face.mcast_group.as_ref(), + outface.mcast_group.as_ref(), + ) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + #[cfg(feature = "stats")] + if !admin { + inc_stats!(face, tx, user, payload) + } else { + inc_stats!(face, tx, admin, payload) + } + + outface.primitives.send_push(Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: context.unwrap_or(0), + }, + payload: payload.clone(), + }) + } + } + } + } + } + } + } + None => { + log::error!("Route data with unknown scope {}!", expr.scope); + } + } +} + +pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { + let tables = zread!(tables_ref); + match tables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + let res = get_mut_unchecked(&mut res); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => match &ctx.subs { + Some(_subinfo) => { + // let reliability = subinfo.reliability; + let lock = zlock!(tables.pull_caches_lock); + let route = get_mut_unchecked(ctx) + .last_values + .drain() + .map(|(name, sample)| { + ( + Resource::get_best_key(&tables.root_res, &name, face.id) + .to_owned(), + sample, + ) + }) + .collect::>(); + drop(lock); + drop(tables); + for (key_expr, payload) in route { + face.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos: ext::QoSType::push_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + payload, + }); + } + } + None => { + log::error!( + "Pull data for unknown subscription {} (no info)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + }, + None => { + log::error!( + "Pull data for unknown subscription {} (no context)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + } + } + None => { + log::error!( + "Pull data for unknown subscription {} (no resource)!", + prefix.expr() + expr.suffix.as_ref() + ); + } + }, + None => { + log::error!("Pull data with unknown scope {}!", expr.scope); + } + }; +} diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs new file mode 100644 index 0000000000..3d7264dc12 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -0,0 +1,1076 @@ +use crate::net::routing::PREFIX_LIVELINESS; + +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::super::hat::network::Network; +use super::face::FaceState; +use super::resource::{QueryRoute, QueryRoutes, QueryTargetQabl, QueryTargetQablSet, Resource}; +use super::tables::{RoutingExpr, Tables, TablesLock}; +use async_trait::async_trait; +use ordered_float::OrderedFloat; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::HashMap; +use std::convert::TryFrom; +use std::sync::{Arc, Weak}; +use zenoh_buffers::ZBuf; +use zenoh_protocol::{ + core::{ + key_expr::{ + include::{Includer, DEFAULT_INCLUDER}, + OwnedKeyExpr, + }, + Encoding, WhatAmI, WireExpr, ZenohId, + }, + network::{ + declare::{ext, queryable::ext::QueryableInfo}, + request::{ext::TargetType, Request, RequestId}, + response::{self, ext::ResponderIdType, Response, ResponseFinal}, + }, + zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, +}; +use zenoh_sync::get_mut_unchecked; +use zenoh_util::Timed; + +pub(crate) struct Query { + src_face: Arc, + src_qid: RequestId, +} + +#[inline] +#[allow(clippy::too_many_arguments)] +fn insert_target_for_qabls( + route: &mut QueryTargetQablSet, + expr: &mut RoutingExpr, + tables: &Tables, + net: &Network, + source: usize, + qabls: &HashMap, + complete: bool, +) { + if net.trees.len() > source { + for (qabl, qabl_info) in qabls { + if let Some(qabl_idx) = net.get_idx(qabl) { + if net.trees[source].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + if net.distances.len() > qabl_idx.index() { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: ( + face.clone(), + key_expr.to_owned(), + if source != 0 { + Some(source as u16) + } else { + None + }, + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: net.distances[qabl_idx.index()], + }); + } + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +lazy_static::lazy_static! { + static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); +} +fn compute_query_route( + tables: &Tables, + expr: &mut RoutingExpr, + source: Option, + source_type: WhatAmI, +) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return EMPTY_ROUTE.clone(); + } + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !tables.hat.full_net(WhatAmI::Peer) + || *tables + .hat + .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = tables.hat.routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &mres.context().router_qabls, + complete, + ); + } + + if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().peer_qabls, + complete, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: (context.face.clone(), key_expr.to_owned(), None), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } + } + } + } + } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) +} + +pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = tables + .hat + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.routers_query_routes[idx.index()] = + compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + } + + routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && tables.hat.full_net(WhatAmI::Peer) + { + let indexes = tables + .hat + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.peers_query_routes[idx.index()] = + compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + } + } + if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + routes.client_query_route = Some(compute_query_route( + tables, + &mut expr, + None, + WhatAmI::Client, + )); + routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + routes.client_query_route = Some(compute_query_route( + tables, + &mut expr, + None, + WhatAmI::Client, + )); + } + routes +} + +pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = tables + .hat + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = + compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + } + + res_mut.context_mut().peer_query_route = + Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && tables.hat.full_net(WhatAmI::Peer) + { + let indexes = tables + .hat + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = + compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + } + } + if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(compute_query_route( + tables, + &mut expr, + None, + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = + Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(compute_query_route( + tables, + &mut expr, + None, + WhatAmI::Client, + )); + } + } +} + +pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { + compute_query_routes(tables, res); + let res = get_mut_unchecked(res); + for child in res.childs.values_mut() { + compute_query_routes_from(tables, child); + } +} + +pub(crate) fn compute_matches_query_routes_( + tables: &Tables, + res: &Arc, +) -> Vec<(Arc, QueryRoutes)> { + let mut routes = vec![]; + if res.context.is_some() { + routes.push((res.clone(), compute_query_routes_(tables, res))); + for match_ in &res.context().matches { + let match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + let match_routes = compute_query_routes_(tables, &match_); + routes.push((match_, match_routes)); + } + } + } + routes +} + +#[inline] +fn insert_pending_query(outface: &mut Arc, query: Arc) -> RequestId { + let outface_mut = get_mut_unchecked(outface); + outface_mut.next_qid += 1; + let qid = outface_mut.next_qid; + outface_mut.pending_queries.insert(qid, query); + qid +} + +#[inline] +fn should_route( + tables: &Tables, + src_face: &FaceState, + outface: &Arc, + expr: &mut RoutingExpr, +) -> bool { + if src_face.id != outface.id { + let dst_master = tables.whatami != WhatAmI::Router + || outface.whatami != WhatAmI::Peer + || tables.hat.peers_net.is_none() + || tables.zid + == *tables.hat.elect_router( + &tables.zid, + expr.full_expr(), + tables.hat.get_router_links(outface.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || outface.whatami != WhatAmI::Peer + || tables.hat.full_net(WhatAmI::Peer) + || tables.hat.failover_brokering(src_face.zid, outface.zid)); + } + false +} + +#[inline] +fn compute_final_route( + tables: &Tables, + qabls: &Arc, + src_face: &Arc, + expr: &mut RoutingExpr, + target: &TargetType, + query: Arc, +) -> QueryRoute { + match target { + TargetType::All => { + let mut route = HashMap::new(); + for qabl in qabls.iter() { + if should_route(tables, src_face, &qabl.direction.0, expr) { + #[cfg(feature = "complete_n")] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, *target) + }); + } + #[cfg(not(feature = "complete_n"))] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); + } + } + } + route + } + TargetType::AllComplete => { + let mut route = HashMap::new(); + for qabl in qabls.iter() { + if qabl.complete > 0 && should_route(tables, src_face, &qabl.direction.0, expr) { + #[cfg(feature = "complete_n")] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, *target) + }); + } + #[cfg(not(feature = "complete_n"))] + { + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid) + }); + } + } + } + route + } + #[cfg(feature = "complete_n")] + TargetType::Complete(n) => { + let mut route = HashMap::new(); + let mut remaining = *n; + if src_face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { + let source_links = tables + .peers_net + .as_ref() + .map(|net| net.get_links(src_face.zid)) + .unwrap_or_default(); + for qabl in qabls.iter() { + if qabl.direction.0.id != src_face.id + && qabl.complete > 0 + && (qabl.direction.0.whatami != WhatAmI::Peer + || (tables.router_peers_failover_brokering + && Tables::failover_brokering_to( + source_links, + qabl.direction.0.zid, + ))) + { + let nb = std::cmp::min(qabl.complete, remaining); + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, TargetType::Complete(nb)) + }); + remaining -= nb; + if remaining == 0 { + break; + } + } + } + } else { + for qabl in qabls.iter() { + if qabl.direction.0.id != src_face.id && qabl.complete > 0 { + let nb = std::cmp::min(qabl.complete, remaining); + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, TargetType::Complete(nb)) + }); + remaining -= nb; + if remaining == 0 { + break; + } + } + } + } + route + } + TargetType::BestMatching => { + if let Some(qabl) = qabls + .iter() + .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) + { + let mut route = HashMap::new(); + #[cfg(feature = "complete_n")] + { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid, *target)); + } + #[cfg(not(feature = "complete_n"))] + { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query); + route.insert(direction.0.id, (direction, qid)); + } + route + } else { + compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) + } + } + } +} + +#[inline] +fn compute_local_replies( + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, +) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!mres.context().router_subs.is_empty() + || !mres.context().peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } + } + } + } + result +} + +#[derive(Clone)] +struct QueryCleanup { + tables: Arc, + face: Weak, + qid: RequestId, +} + +#[async_trait] +impl Timed for QueryCleanup { + async fn run(&mut self) { + if let Some(mut face) = self.face.upgrade() { + let tables_lock = zwrite!(self.tables.tables); + if let Some(query) = get_mut_unchecked(&mut face) + .pending_queries + .remove(&self.qid) + { + drop(tables_lock); + log::warn!( + "Didn't receive final reply {}:{} from {}: Timeout!", + query.src_face, + self.qid, + face + ); + finalize_pending_query(query); + } + } + } +} + +pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + get_mut_unchecked(res).context_mut().valid_query_routes = false; + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + } + } + } +} + +#[inline] +fn get_query_route( + tables: &Tables, + face: &FaceState, + res: &Option>, + expr: &mut RoutingExpr, + routing_context: u64, +) -> Arc { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => { + let routers_net = tables.hat.routers_net.as_ref().unwrap(); + let local_context = routers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.routers_query_route(local_context)) + .unwrap_or_else(|| { + compute_query_route(tables, expr, Some(local_context), face.whatami) + }) + } + WhatAmI::Peer => { + if tables.hat.full_net(WhatAmI::Peer) { + let peers_net = tables.hat.peers_net.as_ref().unwrap(); + let local_context = peers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.peers_query_route(local_context)) + .unwrap_or_else(|| { + compute_query_route(tables, expr, Some(local_context), face.whatami) + }) + } else { + res.as_ref() + .and_then(|res| res.peer_query_route()) + .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) + } + } + _ => res + .as_ref() + .and_then(|res| res.routers_query_route(0)) + .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + }, + WhatAmI::Peer => { + if tables.hat.full_net(WhatAmI::Peer) { + match face.whatami { + WhatAmI::Router | WhatAmI::Peer => { + let peers_net = tables.hat.peers_net.as_ref().unwrap(); + let local_context = + peers_net.get_local_context(routing_context, face.link_id); + res.as_ref() + .and_then(|res| res.peers_query_route(local_context)) + .unwrap_or_else(|| { + compute_query_route(tables, expr, Some(local_context), face.whatami) + }) + } + _ => res + .as_ref() + .and_then(|res| res.peers_query_route(0)) + .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + } + } else { + res.as_ref() + .and_then(|res| match face.whatami { + WhatAmI::Client => res.client_query_route(), + _ => res.peer_query_route(), + }) + .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) + } + } + _ => res + .as_ref() + .and_then(|res| res.client_query_route()) + .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + } +} + +#[cfg(feature = "stats")] +macro_rules! inc_req_stats { + ( + $face:expr, + $txrx:ident, + $space:ident, + $body:expr + ) => { + paste::paste! { + if let Some(stats) = $face.stats.as_ref() { + use zenoh_buffers::SplitBuffer; + match &$body { + RequestBody::Put(p) => { + stats.[<$txrx _z_put_msgs>].[](1); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + } + RequestBody::Del(_) => { + stats.[<$txrx _z_del_msgs>].[](1); + } + RequestBody::Query(q) => { + stats.[<$txrx _z_query_msgs>].[](1); + stats.[<$txrx _z_query_pl_bytes>].[]( + q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), + ); + } + RequestBody::Pull(_) => (), + } + } + } + }; +} + +#[cfg(feature = "stats")] +macro_rules! inc_res_stats { + ( + $face:expr, + $txrx:ident, + $space:ident, + $body:expr + ) => { + paste::paste! { + if let Some(stats) = $face.stats.as_ref() { + use zenoh_buffers::SplitBuffer; + match &$body { + ResponseBody::Put(p) => { + stats.[<$txrx _z_put_msgs>].[](1); + stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); + } + ResponseBody::Reply(r) => { + stats.[<$txrx _z_reply_msgs>].[](1); + stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); + } + ResponseBody::Err(e) => { + stats.[<$txrx _z_reply_msgs>].[](1); + stats.[<$txrx _z_reply_pl_bytes>].[]( + e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), + ); + } + ResponseBody::Ack(_) => (), + } + } + } + }; +} + +#[allow(clippy::too_many_arguments)] +pub fn route_query( + tables_ref: &Arc, + face: &Arc, + expr: &WireExpr, + qid: RequestId, + target: TargetType, + body: RequestBody, + routing_context: u64, +) { + let rtables = zread!(tables_ref.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => { + log::debug!( + "Route query {}:{} for res {}{}", + face, + qid, + prefix.expr(), + expr.suffix.as_ref(), + ); + let prefix = prefix.clone(); + let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); + + #[cfg(feature = "stats")] + let admin = expr.full_expr().starts_with("@/"); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(face, rx, user, body) + } else { + inc_req_stats!(face, rx, admin, body) + } + + if rtables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || rtables.hat.peers_net.is_none() + || rtables.zid + == *rtables.hat.elect_router( + &rtables.zid, + expr.full_expr(), + rtables.hat.get_router_links(face.zid), + ) + { + let res = Resource::get_resource(&prefix, expr.suffix); + let route = get_query_route(&rtables, face, &res, &mut expr, routing_context); + + let query = Arc::new(Query { + src_face: face.clone(), + src_qid: qid, + }); + + let queries_lock = zwrite!(tables_ref.queries_lock); + let route = compute_final_route(&rtables, &route, face, &mut expr, &target, query); + let local_replies = compute_local_replies(&rtables, &prefix, expr.suffix, face); + let zid = rtables.zid; + + drop(queries_lock); + drop(rtables); + + for (expr, payload) in local_replies { + let payload = ResponseBody::Reply(Reply { + timestamp: None, + encoding: Encoding::default(), + ext_sinfo: None, + ext_consolidation: ConsolidationType::default(), + #[cfg(feature = "shared-memory")] + ext_shm: None, + ext_unknown: vec![], + payload, + }); + #[cfg(feature = "stats")] + if !admin { + inc_res_stats!(face, tx, user, payload) + } else { + inc_res_stats!(face, tx, admin, payload) + } + + face.primitives.clone().send_response(Response { + rid: qid, + wire_expr: expr, + payload, + ext_qos: response::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid, + eid: 0, // TODO + }), + }); + } + + if route.is_empty() { + log::debug!( + "Send final reply {}:{} (no matching queryables or not master)", + face, + qid + ); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + } else { + // let timer = tables.timer.clone(); + // let timeout = tables.queries_default_timeout; + #[cfg(feature = "complete_n")] + { + for ((outface, key_expr, context), qid, t) in route.values() { + // timer.add(TimedEvent::once( + // Instant::now() + timeout, + // QueryCleanup { + // tables: tables_ref.clone(), + // face: Arc::downgrade(&outface), + // *qid, + // }, + // )); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) + } + + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), // TODO + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: context.unwrap_or(0), + }, + ext_target: *t, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }); + } + } + + #[cfg(not(feature = "complete_n"))] + { + for ((outface, key_expr, context), qid) in route.values() { + // timer.add(TimedEvent::once( + // Instant::now() + timeout, + // QueryCleanup { + // tables: tables_ref.clone(), + // face: Arc::downgrade(&outface), + // *qid, + // }, + // )); + #[cfg(feature = "stats")] + if !admin { + inc_req_stats!(outface, tx, user, body) + } else { + inc_req_stats!(outface, tx, admin, body) + } + + log::trace!("Propagate query {}:{} to {}", face, qid, outface); + outface.primitives.send_request(Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: context.unwrap_or(0), + }, + ext_target: target, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }); + } + } + } + } else { + log::debug!("Send final reply {}:{} (not master)", face, qid); + drop(rtables); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + } + } + None => { + log::error!( + "Route query with unknown scope {}! Send final reply.", + expr.scope + ); + drop(rtables); + face.primitives.clone().send_response_final(ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + } + } +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn route_send_response( + tables_ref: &Arc, + face: &mut Arc, + qid: RequestId, + ext_respid: Option, + key_expr: WireExpr, + body: ResponseBody, +) { + let queries_lock = zread!(tables_ref.queries_lock); + #[cfg(feature = "stats")] + let admin = key_expr.as_str().starts_with("@/"); + #[cfg(feature = "stats")] + if !admin { + inc_res_stats!(face, rx, user, body) + } else { + inc_res_stats!(face, rx, admin, body) + } + + match face.pending_queries.get(&qid) { + Some(query) => { + drop(queries_lock); + + #[cfg(feature = "stats")] + if !admin { + inc_res_stats!(query.src_face, tx, user, body) + } else { + inc_res_stats!(query.src_face, tx, admin, body) + } + + query.src_face.primitives.clone().send_response(Response { + rid: query.src_qid, + wire_expr: key_expr.to_owned(), + payload: body, + ext_qos: response::ext::QoSType::response_default(), + ext_tstamp: None, + ext_respid, + }); + } + None => log::warn!( + "Route reply {}:{} from {}: Query nof found!", + face, + qid, + face + ), + } +} + +pub(crate) fn route_send_response_final( + tables_ref: &Arc, + face: &mut Arc, + qid: RequestId, +) { + let queries_lock = zwrite!(tables_ref.queries_lock); + match get_mut_unchecked(face).pending_queries.remove(&qid) { + Some(query) => { + drop(queries_lock); + log::debug!( + "Received final reply {}:{} from {}", + query.src_face, + qid, + face + ); + finalize_pending_query(query); + } + None => log::warn!( + "Route final reply {}:{} from {}: Query nof found!", + face, + qid, + face + ), + } +} + +pub(crate) fn finalize_pending_queries(tables_ref: &TablesLock, face: &mut Arc) { + let queries_lock = zwrite!(tables_ref.queries_lock); + for (_, query) in get_mut_unchecked(face).pending_queries.drain() { + finalize_pending_query(query); + } + drop(queries_lock); +} + +pub(crate) fn finalize_pending_query(query: Arc) { + if let Some(query) = Arc::into_inner(query) { + log::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); + query + .src_face + .primitives + .clone() + .send_response_final(ResponseFinal { + rid: query.src_qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }); + } +} diff --git a/zenoh/src/net/routing/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs similarity index 88% rename from zenoh/src/net/routing/resource.rs rename to zenoh/src/net/routing/dispatcher/resource.rs index e26a9217f3..85872e61b6 100644 --- a/zenoh/src/net/routing/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::face::FaceState; -use super::router::{Tables, TablesLock}; +use super::tables::{Tables, TablesLock}; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use std::hash::{Hash, Hasher}; @@ -33,63 +33,63 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -pub(super) type RoutingContext = u16; +pub(crate) type RoutingContext = u16; -pub(super) type Direction = (Arc, WireExpr<'static>, Option); -pub(super) type Route = HashMap; +pub(crate) type Direction = (Arc, WireExpr<'static>, Option); +pub(crate) type Route = HashMap; #[cfg(feature = "complete_n")] -pub(super) type QueryRoute = HashMap; +pub(crate) type QueryRoute = HashMap; #[cfg(not(feature = "complete_n"))] -pub(super) type QueryRoute = HashMap; -pub(super) struct QueryTargetQabl { - pub(super) direction: Direction, - pub(super) complete: u64, - pub(super) distance: f64, +pub(crate) type QueryRoute = HashMap; +pub(crate) struct QueryTargetQabl { + pub(crate) direction: Direction, + pub(crate) complete: u64, + pub(crate) distance: f64, } -pub(super) type QueryTargetQablSet = Vec; -pub(super) type PullCaches = Vec>; - -pub(super) struct SessionContext { - pub(super) face: Arc, - pub(super) local_expr_id: Option, - pub(super) remote_expr_id: Option, - pub(super) subs: Option, - pub(super) qabl: Option, - pub(super) last_values: HashMap, +pub(crate) type QueryTargetQablSet = Vec; +pub(crate) type PullCaches = Vec>; + +pub(crate) struct SessionContext { + pub(crate) face: Arc, + pub(crate) local_expr_id: Option, + pub(crate) remote_expr_id: Option, + pub(crate) subs: Option, + pub(crate) qabl: Option, + pub(crate) last_values: HashMap, } -pub(super) struct DataRoutes { - pub(super) matching_pulls: Option>, - pub(super) routers_data_routes: Vec>, - pub(super) peers_data_routes: Vec>, - pub(super) peer_data_route: Option>, - pub(super) client_data_route: Option>, +pub(crate) struct DataRoutes { + pub(crate) matching_pulls: Option>, + pub(crate) routers_data_routes: Vec>, + pub(crate) peers_data_routes: Vec>, + pub(crate) peer_data_route: Option>, + pub(crate) client_data_route: Option>, } -pub(super) struct QueryRoutes { - pub(super) routers_query_routes: Vec>, - pub(super) peers_query_routes: Vec>, - pub(super) peer_query_route: Option>, - pub(super) client_query_route: Option>, +pub(crate) struct QueryRoutes { + pub(crate) routers_query_routes: Vec>, + pub(crate) peers_query_routes: Vec>, + pub(crate) peer_query_route: Option>, + pub(crate) client_query_route: Option>, } -pub(super) struct ResourceContext { - pub(super) router_subs: HashSet, - pub(super) peer_subs: HashSet, - pub(super) router_qabls: HashMap, - pub(super) peer_qabls: HashMap, - pub(super) matches: Vec>, - pub(super) matching_pulls: Arc, - pub(super) valid_data_routes: bool, - pub(super) routers_data_routes: Vec>, - pub(super) peers_data_routes: Vec>, - pub(super) peer_data_route: Option>, - pub(super) client_data_route: Option>, - pub(super) valid_query_routes: bool, - pub(super) routers_query_routes: Vec>, - pub(super) peers_query_routes: Vec>, - pub(super) peer_query_route: Option>, - pub(super) client_query_route: Option>, +pub(crate) struct ResourceContext { + pub(crate) router_subs: HashSet, + pub(crate) peer_subs: HashSet, + pub(crate) router_qabls: HashMap, + pub(crate) peer_qabls: HashMap, + pub(crate) matches: Vec>, + pub(crate) matching_pulls: Arc, + pub(crate) valid_data_routes: bool, + pub(crate) routers_data_routes: Vec>, + pub(crate) peers_data_routes: Vec>, + pub(crate) peer_data_route: Option>, + pub(crate) client_data_route: Option>, + pub(crate) valid_query_routes: bool, + pub(crate) routers_query_routes: Vec>, + pub(crate) peers_query_routes: Vec>, + pub(crate) peer_query_route: Option>, + pub(crate) client_query_route: Option>, } impl ResourceContext { @@ -114,7 +114,7 @@ impl ResourceContext { } } - pub(super) fn update_data_routes(&mut self, data_routes: DataRoutes) { + pub(crate) fn update_data_routes(&mut self, data_routes: DataRoutes) { self.valid_data_routes = true; if let Some(matching_pulls) = data_routes.matching_pulls { self.matching_pulls = matching_pulls; @@ -125,7 +125,7 @@ impl ResourceContext { self.client_data_route = data_routes.client_data_route; } - pub(super) fn update_query_routes(&mut self, query_routes: QueryRoutes) { + pub(crate) fn update_query_routes(&mut self, query_routes: QueryRoutes) { self.valid_query_routes = true; self.routers_query_routes = query_routes.routers_query_routes; self.peers_query_routes = query_routes.peers_query_routes; @@ -135,12 +135,12 @@ impl ResourceContext { } pub struct Resource { - pub(super) parent: Option>, - pub(super) suffix: String, - pub(super) nonwild_prefix: Option<(Arc, String)>, - pub(super) childs: HashMap>, - pub(super) context: Option, - pub(super) session_ctxs: HashMap>, + pub(crate) parent: Option>, + pub(crate) suffix: String, + pub(crate) nonwild_prefix: Option<(Arc, String)>, + pub(crate) childs: HashMap>, + pub(crate) context: Option, + pub(crate) session_ctxs: HashMap>, } impl PartialEq for Resource { @@ -187,12 +187,12 @@ impl Resource { } #[inline(always)] - pub(super) fn context(&self) -> &ResourceContext { + pub(crate) fn context(&self) -> &ResourceContext { self.context.as_ref().unwrap() } #[inline(always)] - pub(super) fn context_mut(&mut self) -> &mut ResourceContext { + pub(crate) fn context_mut(&mut self) -> &mut ResourceContext { self.context.as_mut().unwrap() } @@ -269,7 +269,7 @@ impl Resource { } #[inline(always)] - pub(super) fn routers_query_route(&self, context: usize) -> Option> { + pub(crate) fn routers_query_route(&self, context: usize) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { @@ -284,7 +284,7 @@ impl Resource { } #[inline(always)] - pub(super) fn peers_query_route(&self, context: usize) -> Option> { + pub(crate) fn peers_query_route(&self, context: usize) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { @@ -299,7 +299,7 @@ impl Resource { } #[inline(always)] - pub(super) fn peer_query_route(&self) -> Option> { + pub(crate) fn peer_query_route(&self) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { @@ -313,7 +313,7 @@ impl Resource { } #[inline(always)] - pub(super) fn client_query_route(&self) -> Option> { + pub(crate) fn client_query_route(&self) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs new file mode 100644 index 0000000000..bc2eb520a4 --- /dev/null +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -0,0 +1,316 @@ +use crate::net::routing::hat::HatTables; + +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +pub use super::super::hat::pubsub::*; +pub use super::super::hat::queries::*; +use super::face::FaceState; +pub use super::pubsub::*; +pub use super::queries::*; +pub use super::resource::*; +use std::collections::HashMap; +use std::sync::{Arc, Weak}; +use std::sync::{Mutex, RwLock}; +use std::time::Duration; +use uhlc::HLC; +use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; +use zenoh_protocol::network::Mapping; +#[cfg(feature = "stats")] +use zenoh_transport::stats::TransportStats; +use zenoh_transport::Primitives; +// use zenoh_collections::Timer; +use zenoh_sync::get_mut_unchecked; + +pub(crate) struct RoutingExpr<'a> { + pub(crate) prefix: &'a Arc, + pub(crate) suffix: &'a str, + full: Option, +} + +impl<'a> RoutingExpr<'a> { + #[inline] + pub(crate) fn new(prefix: &'a Arc, suffix: &'a str) -> Self { + RoutingExpr { + prefix, + suffix, + full: None, + } + } + + #[inline] + pub(crate) fn full_expr(&mut self) -> &str { + if self.full.is_none() { + self.full = Some(self.prefix.expr() + self.suffix); + } + self.full.as_ref().unwrap() + } +} + +pub struct Tables { + pub(crate) zid: ZenohId, + pub(crate) whatami: WhatAmI, + pub(crate) face_counter: usize, + #[allow(dead_code)] + pub(crate) hlc: Option>, + pub(crate) drop_future_timestamp: bool, + // pub(crate) timer: Timer, + // pub(crate) queries_default_timeout: Duration, + pub(crate) root_res: Arc, + pub(crate) faces: HashMap>, + pub(crate) mcast_groups: Vec>, + pub(crate) mcast_faces: Vec>, + pub(crate) pull_caches_lock: Mutex<()>, + pub(crate) hat: HatTables, +} + +impl Tables { + pub fn new( + zid: ZenohId, + whatami: WhatAmI, + hlc: Option>, + drop_future_timestamp: bool, + router_peers_failover_brokering: bool, + _queries_default_timeout: Duration, + ) -> Self { + Tables { + zid, + whatami, + face_counter: 0, + hlc, + drop_future_timestamp, + // timer: Timer::new(true), + // queries_default_timeout, + root_res: Resource::root(), + faces: HashMap::new(), + mcast_groups: vec![], + mcast_faces: vec![], + pull_caches_lock: Mutex::new(()), + hat: HatTables::new(router_peers_failover_brokering), + } + } + + #[doc(hidden)] + pub fn _get_root(&self) -> &Arc { + &self.root_res + } + + pub fn print(&self) -> String { + Resource::print_tree(&self.root_res) + } + + #[inline] + #[allow(clippy::trivially_copy_pass_by_ref)] + pub(crate) fn get_mapping<'a>( + &'a self, + face: &'a FaceState, + expr_id: &ExprId, + mapping: Mapping, + ) -> Option<&'a Arc> { + match expr_id { + 0 => Some(&self.root_res), + expr_id => face.get_mapping(expr_id, mapping), + } + } + + #[inline] + pub(crate) fn get_face(&self, zid: &ZenohId) -> Option<&Arc> { + self.faces.values().find(|face| face.zid == *zid) + } + + pub(crate) fn open_net_face( + &mut self, + zid: ZenohId, + whatami: WhatAmI, + #[cfg(feature = "stats")] stats: Arc, + primitives: Arc, + link_id: usize, + ) -> Weak { + let fid = self.face_counter; + self.face_counter += 1; + let mut newface = self + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + primitives.clone(), + link_id, + None, + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(self, &mut newface); + queries_new_face(self, &mut newface); + + Arc::downgrade(&newface) + } + + pub fn open_face( + &mut self, + zid: ZenohId, + whatami: WhatAmI, + primitives: Arc, + ) -> Weak { + let fid = self.face_counter; + self.face_counter += 1; + let mut newface = self + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + None, + primitives.clone(), + 0, + None, + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(self, &mut newface); + queries_new_face(self, &mut newface); + + Arc::downgrade(&newface) + } + + fn compute_routes(&mut self, res: &mut Arc) { + compute_data_routes(self, res); + compute_query_routes(self, res); + } + + pub(crate) fn compute_matches_routes(&mut self, res: &mut Arc) { + if res.context.is_some() { + self.compute_routes(res); + + let resclone = res.clone(); + for match_ in &mut get_mut_unchecked(res).context_mut().matches { + let match_ = &mut match_.upgrade().unwrap(); + if !Arc::ptr_eq(match_, &resclone) && match_.context.is_some() { + self.compute_routes(match_); + } + } + } + } +} + +pub fn close_face(tables: &TablesLock, face: &Weak) { + match face.upgrade() { + Some(mut face) => { + log::debug!("Close {}", face); + finalize_pending_queries(tables, &mut face); + + let ctrl_lock = zlock!(tables.ctrl_lock); + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(&mut face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face.remote_subs.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face.remote_qabls.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push((_match.clone(), compute_data_routes_(&rtables, &_match))); + } + for _match in qabls_matches.drain(..) { + matches_query_routes + .push((_match.clone(), compute_query_routes_(&rtables, &_match))); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + drop(ctrl_lock); + } + None => log::error!("Face already closed!"), + } +} + +pub struct TablesLock { + pub tables: RwLock, + pub ctrl_lock: Mutex<()>, + pub queries_lock: RwLock<()>, +} diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs new file mode 100644 index 0000000000..905c4ff4fd --- /dev/null +++ b/zenoh/src/net/routing/hat/mod.rs @@ -0,0 +1,195 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use self::network::Network; +use super::dispatcher::tables::{Resource, TablesLock}; +use async_std::task::JoinHandle; +use std::{ + collections::{hash_map::DefaultHasher, HashSet}, + hash::Hasher, + sync::Arc, +}; +use zenoh_config::{WhatAmI, ZenohId}; + +pub mod network; +pub mod pubsub; +pub mod queries; + +zconfigurable! { + static ref TREES_COMPUTATION_DELAY: u64 = 100; +} + +pub struct HatTables { + pub(crate) router_subs: HashSet>, + pub(crate) peer_subs: HashSet>, + pub(crate) router_qabls: HashSet>, + pub(crate) peer_qabls: HashSet>, + pub(crate) routers_net: Option, + pub(crate) peers_net: Option, + pub(crate) shared_nodes: Vec, + pub(crate) routers_trees_task: Option>, + pub(crate) peers_trees_task: Option>, + pub(crate) router_peers_failover_brokering: bool, +} + +impl HatTables { + pub fn new(router_peers_failover_brokering: bool) -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashSet::new(), + peer_qabls: HashSet::new(), + routers_net: None, + peers_net: None, + shared_nodes: vec![], + routers_trees_task: None, + peers_trees_task: None, + router_peers_failover_brokering, + } + } + + #[inline] + pub(crate) fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + match net_type { + WhatAmI::Router => self.routers_net.as_ref(), + WhatAmI::Peer => self.peers_net.as_ref(), + _ => None, + } + } + + #[inline] + pub(crate) fn full_net(&self, net_type: WhatAmI) -> bool { + match net_type { + WhatAmI::Router => self + .routers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + WhatAmI::Peer => self + .peers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + _ => false, + } + } + + #[inline] + pub(crate) fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + self.peers_net + .as_ref() + .unwrap() + .get_links(peer) + .iter() + .filter(move |nid| { + if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { + node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router + } else { + false + } + }) + } + + #[inline] + pub(crate) fn elect_router<'a>( + &'a self, + self_zid: &'a ZenohId, + key_expr: &str, + mut routers: impl Iterator, + ) -> &'a ZenohId { + match routers.next() { + None => self_zid, + Some(router) => { + let hash = |r: &ZenohId| { + let mut hasher = DefaultHasher::new(); + for b in key_expr.as_bytes() { + hasher.write_u8(*b); + } + for b in &r.to_le_bytes()[..r.size()] { + hasher.write_u8(*b); + } + hasher.finish() + }; + let mut res = router; + let mut h = None; + for router2 in routers { + let h2 = hash(router2); + if h2 > *h.get_or_insert_with(|| hash(res)) { + res = router2; + h = Some(h2); + } + } + res + } + } + } + + #[inline] + pub(crate) fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + // if source_links is empty then gossip is probably disabled in source peer + !source_links.is_empty() && !source_links.contains(&dest) + } + + #[inline] + pub(crate) fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + self.router_peers_failover_brokering + && self + .peers_net + .as_ref() + .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .unwrap_or(false) + } + + pub(crate) fn schedule_compute_trees( + &mut self, + tables_ref: Arc, + net_type: WhatAmI, + ) { + log::trace!("Schedule computations"); + if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + { + let task = Some(async_std::task::spawn(async move { + async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => tables.hat.routers_net.as_mut().unwrap().compute_trees(), + _ => tables.hat.peers_net.as_mut().unwrap().compute_trees(), + }; + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); + + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => tables.hat.routers_trees_task = None, + _ => tables.hat.peers_trees_task = None, + }; + })); + match net_type { + WhatAmI::Router => self.routers_trees_task = task, + _ => self.peers_trees_task = task, + }; + } + } +} diff --git a/zenoh/src/net/routing/network.rs b/zenoh/src/net/routing/hat/network.rs similarity index 99% rename from zenoh/src/net/routing/network.rs rename to zenoh/src/net/routing/hat/network.rs index 3af1e0a87c..2c1a9746da 100644 --- a/zenoh/src/net/routing/network.rs +++ b/zenoh/src/net/routing/hat/network.rs @@ -985,7 +985,7 @@ impl Network { } #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + pub(crate) fn get_links(&self, node: ZenohId) -> &[ZenohId] { self.get_node(&node) .map(|node| &node.links[..]) .unwrap_or_default() @@ -993,7 +993,7 @@ impl Network { } #[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { +pub(crate) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { net1.graph .node_references() .filter_map(|(_, node1)| { diff --git a/zenoh/src/net/routing/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs similarity index 58% rename from zenoh/src/net/routing/pubsub.rs rename to zenoh/src/net/routing/hat/pubsub.rs index 3deba60260..a92d8a4640 100644 --- a/zenoh/src/net/routing/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -11,32 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; +use super::super::dispatcher::face::FaceState; +use super::super::dispatcher::pubsub::*; +use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use super::super::dispatcher::tables::{Tables, TablesLock}; +use super::super::PREFIX_LIVELINESS; use super::network::Network; -use super::resource::{ - DataRoutes, Direction, PullCaches, Resource, Route, RoutingContext, SessionContext, -}; -use super::router::{RoutingExpr, Tables, TablesLock}; +use super::HatTables; use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::sync::RwLock; +use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; use zenoh_core::zread; use zenoh_protocol::{ - core::{ - key_expr::{keyexpr, OwnedKeyExpr}, - Reliability, WhatAmI, WireExpr, ZenohId, - }, - network::{ - declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, - }, - Push, + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, Mode, UndeclareSubscriber, }, - zenoh::PushBody, }; use zenoh_sync::get_mut_unchecked; @@ -88,7 +79,7 @@ fn propagate_simple_subscription_to( src_face: &mut Arc, full_peer_net: bool, ) { - if (src_face.id != dst_face.id || res.expr().starts_with(super::PREFIX_LIVELINESS)) + if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !dst_face.local_subs.contains(res) && match tables.whatami { WhatAmI::Router => { @@ -98,7 +89,7 @@ fn propagate_simple_subscription_to( dst_face.whatami != WhatAmI::Router && (src_face.whatami != WhatAmI::Peer || dst_face.whatami != WhatAmI::Peer - || tables.failover_brokering(src_face.zid, dst_face.zid)) + || tables.hat.failover_brokering(src_face.zid, dst_face.zid)) } } WhatAmI::Peer => { @@ -132,7 +123,7 @@ fn propagate_simple_subscription( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - let full_peer_net = tables.full_net(WhatAmI::Peer); + let full_peer_net = tables.hat.full_net(WhatAmI::Peer); for mut dst_face in tables .faces .values() @@ -158,7 +149,7 @@ fn propagate_sourced_subscription( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -207,14 +198,14 @@ fn register_router_subscription( .context_mut() .router_subs .insert(router); - tables.router_subs.insert(res.clone()); + tables.hat.router_subs.insert(res.clone()); } // Propagate subscription to routers propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); } // Propagate subscription to peers - if tables.full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + if tables.hat.full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { register_peer_subscription(tables, face, res, sub_info, tables.zid) } @@ -290,7 +281,7 @@ fn register_peer_subscription( { log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); get_mut_unchecked(res).context_mut().peer_subs.insert(peer); - tables.peer_subs.insert(res.clone()); + tables.hat.peer_subs.insert(res.clone()); } // Propagate subscription to peers @@ -453,7 +444,7 @@ pub fn declare_client_subscription( ); } WhatAmI::Peer => { - if wtables.full_net(WhatAmI::Peer) { + if wtables.hat.full_net(WhatAmI::Peer) { let zid = wtables.zid; register_peer_subscription( &mut wtables, @@ -609,7 +600,7 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc } fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !tables.full_net(WhatAmI::Peer) + if !tables.hat.full_net(WhatAmI::Peer) && res.context().router_subs.len() == 1 && res.context().router_subs.contains(&tables.zid) { @@ -626,7 +617,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< && s.subs.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) + && tables.hat.failover_brokering(s.face.zid, face.zid))) }) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -653,7 +644,7 @@ fn propagate_forget_sourced_subscription( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -694,9 +685,9 @@ fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, .retain(|sub| sub != router); if res.context().router_subs.is_empty() { - tables.router_subs.retain(|sub| !Arc::ptr_eq(sub, res)); + tables.hat.router_subs.retain(|sub| !Arc::ptr_eq(sub, res)); - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_subscription(tables, res); @@ -763,7 +754,7 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe .retain(|sub| sub != peer); if res.context().peer_subs.is_empty() { - tables.peer_subs.retain(|sub| !Arc::ptr_eq(sub, res)); + tables.hat.peer_subs.retain(|sub| !Arc::ptr_eq(sub, res)); if tables.whatami == WhatAmI::Peer { propagate_forget_simple_subscription(tables, res); @@ -849,7 +840,7 @@ pub(crate) fn undeclare_client_subscription( } WhatAmI::Peer => { if client_subs.is_empty() { - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } else { propagate_forget_simple_subscription(tables, res); @@ -865,8 +856,7 @@ pub(crate) fn undeclare_client_subscription( if client_subs.len() == 1 && !router_subs && !peer_subs { let face = &mut client_subs[0]; if face.local_subs.contains(res) - && !(face.whatami == WhatAmI::Client - && res.expr().starts_with(super::PREFIX_LIVELINESS)) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { @@ -926,7 +916,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { match tables.whatami { WhatAmI::Router => { if face.whatami == WhatAmI::Client { - for sub in &tables.router_subs { + for sub in &tables.hat.router_subs { get_mut_unchecked(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { @@ -940,15 +930,15 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { }), }); } - } else if face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - for sub in &tables.router_subs { + } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + for sub in &tables.hat.router_subs { if sub.context.is_some() && (sub.context().router_subs.iter().any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { s.subs.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) + && tables.hat.failover_brokering(s.face.zid, face.zid))) })) { get_mut_unchecked(face).local_subs.insert(sub.clone()); @@ -968,9 +958,9 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } } WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { if face.whatami == WhatAmI::Client { - for sub in &tables.peer_subs { + for sub in &tables.hat.peer_subs { get_mut_unchecked(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { @@ -1031,6 +1021,7 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: match net_type { WhatAmI::Router => { for mut res in tables + .hat .router_subs .iter() .filter(|res| res.context().router_subs.contains(node)) @@ -1050,6 +1041,7 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } WhatAmI::Peer => { for mut res in tables + .hat .peer_subs .iter() .filter(|res| res.context().peer_subs.contains(node)) @@ -1088,14 +1080,14 @@ pub(crate) fn pubsub_tree_change( // propagate subs to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let subs_res = match net_type { - WhatAmI::Router => &tables.router_subs, - _ => &tables.peer_subs, + WhatAmI::Router => &tables.hat.router_subs, + _ => &tables.hat.peer_subs, }; for res in subs_res { @@ -1131,7 +1123,7 @@ pub(crate) fn pubsub_tree_change( pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { - if tables.router_peers_failover_brokering + if tables.hat.router_peers_failover_brokering && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { @@ -1148,9 +1140,10 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { if dst_face.local_subs.contains(res) { - let forget = !Tables::failover_brokering_to(links, dst_face.zid) + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = tables + .hat .peers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) @@ -1158,7 +1151,7 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: res.session_ctxs.values().any(|ctx2| { ctx2.face.whatami == WhatAmI::Peer && ctx2.subs.is_some() - && Tables::failover_brokering_to( + && HatTables::failover_brokering_to( ctx_links, ctx2.face.zid, ) @@ -1180,7 +1173,7 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: get_mut_unchecked(dst_face).local_subs.remove(res); } - } else if Tables::failover_brokering_to(links, ctx.face.zid) { + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; get_mut_unchecked(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); @@ -1206,740 +1199,3 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } } - -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - subs: &HashSet, -) { - if net.trees.len() > source { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source].directions[sub_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ) - }); - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -fn compute_data_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = HashMap::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return Arc::new(route); - } - log::trace!( - "compute_data_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return Arc::new(route); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.full_net(WhatAmI::Peer) - || *tables.elect_router(&key_expr, tables.shared_nodes.iter()) == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_subs, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), None) - }); - } - } - } - } - } - for mcast_group in &tables.mcast_groups { - route.insert( - mcast_group.id, - ( - mcast_group.clone(), - expr.full_expr().to_string().into(), - None, - ), - ); - } - Arc::new(route) -} - -fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return Arc::new(pull_caches); - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - Arc::new(pull_caches) -} - -pub(super) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - } - routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); - routes -} - -pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - } - res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); - } -} - -pub(super) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_data_routes(tables, res); - let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { - compute_data_routes_from(tables, child); - } -} - -pub(super) fn compute_matches_data_routes_<'a>( - tables: &'a Tables, - res: &'a Arc, -) -> Vec<(Arc, DataRoutes)> { - let mut routes = vec![]; - if res.context.is_some() { - routes.push((res.clone(), compute_data_routes_(tables, res))); - for match_ in &res.context().matches { - let match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_data_routes_(tables, &match_); - routes.push((match_, match_routes)); - } - } - } - routes -} - -pub(super) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_data_routes = false; - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - } - } - } -} - -macro_rules! treat_timestamp { - ($hlc:expr, $payload:expr, $drop:expr) => { - // if an HLC was configured (via Config.add_timestamp), - // check DataInfo and add a timestamp if there isn't - if let Some(hlc) = $hlc { - if let PushBody::Put(data) = &mut $payload { - if let Some(ref ts) = data.timestamp { - // Timestamp is present; update HLC with it (possibly raising error if delta exceed) - match hlc.update_with_timestamp(ts) { - Ok(()) => (), - Err(e) => { - if $drop { - log::error!( - "Error treating timestamp for received Data ({}). Drop it!", - e - ); - return; - } else { - data.timestamp = Some(hlc.new_timestamp()); - log::error!( - "Error treating timestamp for received Data ({}). Replace timestamp: {:?}", - e, - data.timestamp); - } - } - } - } else { - // Timestamp not present; add one - data.timestamp = Some(hlc.new_timestamp()); - log::trace!("Adding timestamp to DataInfo: {:?}", data.timestamp); - } - } - } - } -} - -#[inline] -fn get_data_route( - tables: &Tables, - face: &FaceState, - res: &Option>, - expr: &mut RoutingExpr, - routing_context: u64, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => { - let routers_net = tables.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.routers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) - }) - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), - }, - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - match face.whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), - } - } else { - res.as_ref() - .and_then(|res| match face.whatami { - WhatAmI::Client => res.client_data_route(), - _ => res.peer_data_route(), - }) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), - } -} - -#[inline] -fn get_matching_pulls( - tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) -} - -macro_rules! cache_data { - ( - $matching_pulls:expr, - $expr:expr, - $payload:expr - ) => { - for context in $matching_pulls.iter() { - get_mut_unchecked(&mut context.clone()) - .last_values - .insert($expr.full_expr().to_string(), $payload.clone()); - } - }; -} - -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id - && match (src_face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(outface.zid)); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.full_net(WhatAmI::Peer) - || tables.failover_brokering(src_face.zid, outface.zid)); - } - false -} - -#[cfg(feature = "stats")] -macro_rules! inc_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; - match &$body { - PushBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - PushBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } - } - } - } - }; -} - -#[allow(clippy::too_many_arguments)] -pub fn full_reentrant_route_data( - tables_ref: &RwLock, - face: &FaceState, - expr: &WireExpr, - ext_qos: ext::QoSType, - mut payload: PushBody, - routing_context: u64, -) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { - Some(prefix) => { - log::trace!( - "Route data for res {}{}", - prefix.expr(), - expr.suffix.as_ref() - ); - let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); - - #[cfg(feature = "stats")] - let admin = expr.full_expr().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, rx, user, payload) - } else { - inc_stats!(face, rx, admin, payload) - } - - if tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(face.zid)) - { - let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); - - if !(route.is_empty() && matching_pulls.is_empty()) { - treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - - if route.len() == 1 && matching_pulls.len() == 0 { - let (outface, key_expr, context) = route.values().next().unwrap(); - if should_route(&tables, face, outface, &mut expr) { - drop(tables); - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload, - }) - } - } else { - if !matching_pulls.is_empty() { - let lock = zlock!(tables.pull_caches_lock); - cache_data!(matching_pulls, expr, payload); - drop(lock); - } - - if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - should_route(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); - - drop(tables); - for (outface, key_expr, context) in route { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload: payload.clone(), - }) - } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match ( - face.mcast_group.as_ref(), - outface.mcast_group.as_ref(), - ) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload: payload.clone(), - }) - } - } - } - } - } - } - } - None => { - log::error!("Route data with unknown scope {}!", expr.scope); - } - } -} - -pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - let res = get_mut_unchecked(&mut res); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(_subinfo) => { - // let reliability = subinfo.reliability; - let lock = zlock!(tables.pull_caches_lock); - let route = get_mut_unchecked(ctx) - .last_values - .drain() - .map(|(name, sample)| { - ( - Resource::get_best_key(&tables.root_res, &name, face.id) - .to_owned(), - sample, - ) - }) - .collect::>(); - drop(lock); - drop(tables); - for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload, - }); - } - } - None => { - log::error!( - "Pull data for unknown subscription {} (no info)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!( - "Pull data for unknown subscription {} (no context)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - } - } - None => { - log::error!( - "Pull data for unknown subscription {} (no resource)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!("Pull data with unknown scope {}!", expr.scope); - } - }; -} diff --git a/zenoh/src/net/routing/queries.rs b/zenoh/src/net/routing/hat/queries.rs similarity index 53% rename from zenoh/src/net/routing/queries.rs rename to zenoh/src/net/routing/hat/queries.rs index c55cfc046c..3e4da6cab8 100644 --- a/zenoh/src/net/routing/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -11,46 +11,23 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::FaceState; +use super::super::dispatcher::face::FaceState; +use super::super::dispatcher::queries::*; +use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use super::super::dispatcher::tables::{Tables, TablesLock}; use super::network::Network; -use super::resource::{ - QueryRoute, QueryRoutes, QueryTargetQabl, QueryTargetQablSet, Resource, RoutingContext, - SessionContext, -}; -use super::router::{RoutingExpr, Tables, TablesLock}; -use async_trait::async_trait; -use ordered_float::OrderedFloat; +use super::HatTables; use petgraph::graph::NodeIndex; -use std::borrow::Cow; use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::{Arc, RwLockReadGuard, Weak}; -use zenoh_buffers::ZBuf; +use std::sync::{Arc, RwLockReadGuard}; use zenoh_protocol::{ - core::{ - key_expr::{ - include::{Includer, DEFAULT_INCLUDER}, - keyexpr, OwnedKeyExpr, - }, - Encoding, WhatAmI, WireExpr, ZenohId, + core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, }, - network::{ - declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, - }, - request::{ext::TargetType, Request, RequestId}, - response::{self, ext::ResponderIdType, Response, ResponseFinal}, - }, - zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, }; use zenoh_sync::get_mut_unchecked; -use zenoh_util::Timed; - -pub(crate) struct Query { - src_face: Arc, - src_qid: RequestId, -} #[cfg(feature = "complete_n")] #[inline] @@ -69,7 +46,7 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI } fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.full_net(WhatAmI::Peer) { + let info = if tables.hat.full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|ctx| { ctx.peer_qabls.iter().fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -157,7 +134,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) } else { None }; - if res.context.is_some() && tables.full_net(WhatAmI::Peer) { + if res.context.is_some() && tables.hat.full_net(WhatAmI::Peer) { info = res .context() .peer_qabls @@ -178,7 +155,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) .fold(info, |accu, ctx| { if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer || face.whatami != WhatAmI::Peer - || tables.failover_brokering(ctx.face.zid, face.zid) + || tables.hat.failover_brokering(ctx.face.zid, face.zid) { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { @@ -243,7 +220,7 @@ fn propagate_simple_queryable( res: &Arc, src_face: Option<&mut Arc>, ) { - let full_peers_net = tables.full_net(WhatAmI::Peer); + let full_peers_net = tables.hat.full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); @@ -259,7 +236,7 @@ fn propagate_simple_queryable( && (src_face.is_none() || src_face.as_ref().unwrap().whatami != WhatAmI::Peer || dst_face.whatami != WhatAmI::Peer - || tables.failover_brokering( + || tables.hat.failover_brokering( src_face.as_ref().unwrap().zid, dst_face.zid, )) @@ -307,7 +284,7 @@ fn propagate_sourced_queryable( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -357,7 +334,7 @@ fn register_router_queryable( .context_mut() .router_qabls .insert(router, *qabl_info); - tables.router_qabls.insert(res.clone()); + tables.hat.router_qabls.insert(res.clone()); } // Propagate queryable to routers @@ -371,7 +348,7 @@ fn register_router_queryable( ); } - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { // Propagate queryable to peers if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { let local_info = local_peer_qabl_info(tables, res); @@ -453,7 +430,7 @@ fn register_peer_queryable( .context_mut() .peer_qabls .insert(peer, *qabl_info); - tables.peer_qabls.insert(res.clone()); + tables.hat.peer_qabls.insert(res.clone()); } // Propagate queryable to peers @@ -607,7 +584,7 @@ pub fn declare_client_queryable( ); } WhatAmI::Peer => { - if wtables.full_net(WhatAmI::Peer) { + if wtables.hat.full_net(WhatAmI::Peer) { let local_details = local_peer_qabl_info(&wtables, &res); let zid = wtables.zid; register_peer_queryable( @@ -735,7 +712,7 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - if !tables.full_net(WhatAmI::Peer) + if !tables.hat.full_net(WhatAmI::Peer) && res.context().router_qabls.len() == 1 && res.context().router_qabls.contains_key(&tables.zid) { @@ -752,7 +729,7 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc && s.qabl.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) + && tables.hat.failover_brokering(s.face.zid, face.zid))) }) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -779,7 +756,7 @@ fn propagate_forget_sourced_queryable( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -820,9 +797,12 @@ fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, rou .remove(router); if res.context().router_qabls.is_empty() { - tables.router_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); + tables + .hat + .router_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_queryable(tables, res); @@ -883,7 +863,7 @@ fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: get_mut_unchecked(res).context_mut().peer_qabls.remove(peer); if res.context().peer_qabls.is_empty() { - tables.peer_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); + tables.hat.peer_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); if tables.whatami == WhatAmI::Peer { propagate_forget_simple_queryable(tables, res); @@ -977,7 +957,7 @@ pub(crate) fn undeclare_client_queryable( } } WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { if client_qabls.is_empty() { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } else { @@ -1056,7 +1036,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { match tables.whatami { WhatAmI::Router => { if face.whatami == WhatAmI::Client { - for qabl in tables.router_qabls.iter() { + for qabl in tables.hat.router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); get_mut_unchecked(face) @@ -1075,15 +1055,15 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { }); } } - } else if face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - for qabl in tables.router_qabls.iter() { + } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { + for qabl in tables.hat.router_qabls.iter() { if qabl.context.is_some() && (qabl.context().router_qabls.keys().any(|r| *r != tables.zid) || qabl.session_ctxs.values().any(|s| { s.qabl.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) + && tables.hat.failover_brokering(s.face.zid, face.zid))) })) { let info = local_qabl_info(tables, qabl, face); @@ -1106,9 +1086,9 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } } WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { if face.whatami == WhatAmI::Client { - for qabl in &tables.peer_qabls { + for qabl in &tables.hat.peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); get_mut_unchecked(face) @@ -1160,7 +1140,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: match net_type { WhatAmI::Router => { let mut qabls = vec![]; - for res in tables.router_qabls.iter() { + for res in tables.hat.router_qabls.iter() { for qabl in res.context().router_qabls.keys() { if qabl == node { qabls.push(res.clone()); @@ -1181,7 +1161,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } WhatAmI::Peer => { let mut qabls = vec![]; - for res in tables.router_qabls.iter() { + for res in tables.hat.router_qabls.iter() { for qabl in res.context().router_qabls.keys() { if qabl == node { qabls.push(res.clone()); @@ -1217,7 +1197,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { - if tables.router_peers_failover_brokering + if tables.hat.router_peers_failover_brokering && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { @@ -1234,9 +1214,10 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { if dst_face.local_qabls.contains_key(res) { - let forget = !Tables::failover_brokering_to(links, dst_face.zid) + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = tables + .hat .peers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) @@ -1244,7 +1225,7 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links res.session_ctxs.values().any(|ctx2| { ctx2.face.whatami == WhatAmI::Peer && ctx2.qabl.is_some() - && Tables::failover_brokering_to( + && HatTables::failover_brokering_to( ctx_links, ctx2.face.zid, ) @@ -1264,7 +1245,7 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links get_mut_unchecked(dst_face).local_qabls.remove(res); } - } else if Tables::failover_brokering_to(links, ctx.face.zid) { + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); get_mut_unchecked(dst_face) @@ -1298,14 +1279,14 @@ pub(crate) fn queries_tree_change( // propagate qabls to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = tables.get_net(net_type).unwrap(); + let net = tables.hat.get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let qabls_res = match net_type { - WhatAmI::Router => &tables.router_qabls, - _ => &tables.peer_qabls, + WhatAmI::Router => &tables.hat.router_qabls, + _ => &tables.hat.peer_qabls, }; for res in qabls_res { @@ -1332,1015 +1313,3 @@ pub(crate) fn queries_tree_change( // recompute routes compute_query_routes_from(tables, &mut tables.root_res.clone()); } - -#[inline] -#[allow(clippy::too_many_arguments)] -fn insert_target_for_qabls( - route: &mut QueryTargetQablSet, - expr: &mut RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - qabls: &HashMap, - complete: bool, -) { - if net.trees.len() > source { - for (qabl, qabl_info) in qabls { - if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - if net.distances.len() > qabl_idx.index() { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: net.distances[qabl_idx.index()], - }); - } - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -lazy_static::lazy_static! { - static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); -} -fn compute_query_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = QueryTargetQablSet::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return EMPTY_ROUTE.clone(); - } - log::trace!( - "compute_query_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return EMPTY_ROUTE.clone(); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.full_net(WhatAmI::Peer) - || *tables.elect_router(&key_expr, tables.shared_nodes.iter()) == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: (context.face.clone(), key_expr.to_owned(), None), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } - } - } - } - } - route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); - Arc::new(route) -} - -pub(super) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - } - routes -} - -pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - } - } -} - -fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_query_routes(tables, res); - let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { - compute_query_routes_from(tables, child); - } -} - -pub(super) fn compute_matches_query_routes_( - tables: &Tables, - res: &Arc, -) -> Vec<(Arc, QueryRoutes)> { - let mut routes = vec![]; - if res.context.is_some() { - routes.push((res.clone(), compute_query_routes_(tables, res))); - for match_ in &res.context().matches { - let match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_query_routes_(tables, &match_); - routes.push((match_, match_routes)); - } - } - } - routes -} - -#[inline] -fn insert_pending_query(outface: &mut Arc, query: Arc) -> RequestId { - let outface_mut = get_mut_unchecked(outface); - outface_mut.next_qid += 1; - let qid = outface_mut.next_qid; - outface_mut.pending_queries.insert(qid, query); - qid -} - -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(outface.zid)); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.full_net(WhatAmI::Peer) - || tables.failover_brokering(src_face.zid, outface.zid)); - } - false -} - -#[inline] -fn compute_final_route( - tables: &Tables, - qabls: &Arc, - src_face: &Arc, - expr: &mut RoutingExpr, - target: &TargetType, - query: Arc, -) -> QueryRoute { - match target { - TargetType::All => { - let mut route = HashMap::new(); - for qabl in qabls.iter() { - if should_route(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - TargetType::AllComplete => { - let mut route = HashMap::new(); - for qabl in qabls.iter() { - if qabl.complete > 0 && should_route(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - #[cfg(feature = "complete_n")] - TargetType::Complete(n) => { - let mut route = HashMap::new(); - let mut remaining = *n; - if src_face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - let source_links = tables - .peers_net - .as_ref() - .map(|net| net.get_links(src_face.zid)) - .unwrap_or_default(); - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id - && qabl.complete > 0 - && (qabl.direction.0.whatami != WhatAmI::Peer - || (tables.router_peers_failover_brokering - && Tables::failover_brokering_to( - source_links, - qabl.direction.0.zid, - ))) - { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } - } - } - } else { - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id && qabl.complete > 0 { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } - } - } - } - route - } - TargetType::BestMatching => { - if let Some(qabl) = qabls - .iter() - .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) - { - let mut route = HashMap::new(); - #[cfg(feature = "complete_n")] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid, *target)); - } - #[cfg(not(feature = "complete_n"))] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - } - route - } else { - compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) - } - } - } -} - -#[inline] -fn compute_local_replies( - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, -) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(super::PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!mres.context().router_subs.is_empty() - || !mres.context().peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result -} - -#[derive(Clone)] -struct QueryCleanup { - tables: Arc, - face: Weak, - qid: RequestId, -} - -#[async_trait] -impl Timed for QueryCleanup { - async fn run(&mut self) { - if let Some(mut face) = self.face.upgrade() { - let tables_lock = zwrite!(self.tables.tables); - if let Some(query) = get_mut_unchecked(&mut face) - .pending_queries - .remove(&self.qid) - { - drop(tables_lock); - log::warn!( - "Didn't receive final reply {}:{} from {}: Timeout!", - query.src_face, - self.qid, - face - ); - finalize_pending_query(query); - } - } - } -} - -pub(super) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_query_routes = false; - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - } - } - } -} - -#[inline] -fn get_query_route( - tables: &Tables, - face: &FaceState, - res: &Option>, - expr: &mut RoutingExpr, - routing_context: u64, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => { - let routers_net = tables.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.routers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - }, - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - match face.whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - } - } else { - res.as_ref() - .and_then(|res| match face.whatami { - WhatAmI::Client => res.client_query_route(), - _ => res.peer_query_route(), - }) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - } -} - -#[cfg(feature = "stats")] -macro_rules! inc_req_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; - match &$body { - RequestBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - RequestBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } - RequestBody::Query(q) => { - stats.[<$txrx _z_query_msgs>].[](1); - stats.[<$txrx _z_query_pl_bytes>].[]( - q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), - ); - } - RequestBody::Pull(_) => (), - } - } - } - }; -} - -#[cfg(feature = "stats")] -macro_rules! inc_res_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::SplitBuffer; - match &$body { - ResponseBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - ResponseBody::Reply(r) => { - stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); - } - ResponseBody::Err(e) => { - stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[]( - e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), - ); - } - ResponseBody::Ack(_) => (), - } - } - } - }; -} - -#[allow(clippy::too_many_arguments)] -pub fn route_query( - tables_ref: &Arc, - face: &Arc, - expr: &WireExpr, - qid: RequestId, - target: TargetType, - body: RequestBody, - routing_context: u64, -) { - let rtables = zread!(tables_ref.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => { - log::debug!( - "Route query {}:{} for res {}{}", - face, - qid, - prefix.expr(), - expr.suffix.as_ref(), - ); - let prefix = prefix.clone(); - let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); - - #[cfg(feature = "stats")] - let admin = expr.full_expr().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(face, rx, user, body) - } else { - inc_req_stats!(face, rx, admin, body) - } - - if rtables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || rtables.peers_net.is_none() - || rtables.zid - == *rtables.elect_router(expr.full_expr(), rtables.get_router_links(face.zid)) - { - let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_query_route(&rtables, face, &res, &mut expr, routing_context); - - let query = Arc::new(Query { - src_face: face.clone(), - src_qid: qid, - }); - - let queries_lock = zwrite!(tables_ref.queries_lock); - let route = compute_final_route(&rtables, &route, face, &mut expr, &target, query); - let local_replies = compute_local_replies(&rtables, &prefix, expr.suffix, face); - let zid = rtables.zid; - - drop(queries_lock); - drop(rtables); - - for (expr, payload) in local_replies { - let payload = ResponseBody::Reply(Reply { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload, - }); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, tx, user, payload) - } else { - inc_res_stats!(face, tx, admin, payload) - } - - face.primitives.clone().send_response(Response { - rid: qid, - wire_expr: expr, - payload, - ext_qos: response::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid, - eid: 0, // TODO - }), - }); - } - - if route.is_empty() { - log::debug!( - "Send final reply {}:{} (no matching queryables or not master)", - face, - qid - ); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } else { - // let timer = tables.timer.clone(); - // let timeout = tables.queries_default_timeout; - #[cfg(feature = "complete_n")] - { - for ((outface, key_expr, context), qid, t) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), // TODO - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - ext_target: *t, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); - } - } - - #[cfg(not(feature = "complete_n"))] - { - for ((outface, key_expr, context), qid) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); - } - } - } - } else { - log::debug!("Send final reply {}:{} (not master)", face, qid); - drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } - } - None => { - log::error!( - "Route query with unknown scope {}! Send final reply.", - expr.scope - ); - drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } - } -} - -#[allow(clippy::too_many_arguments)] -pub(crate) fn route_send_response( - tables_ref: &Arc, - face: &mut Arc, - qid: RequestId, - ext_respid: Option, - key_expr: WireExpr, - body: ResponseBody, -) { - let queries_lock = zread!(tables_ref.queries_lock); - #[cfg(feature = "stats")] - let admin = key_expr.as_str().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, rx, user, body) - } else { - inc_res_stats!(face, rx, admin, body) - } - - match face.pending_queries.get(&qid) { - Some(query) => { - drop(queries_lock); - - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(query.src_face, tx, user, body) - } else { - inc_res_stats!(query.src_face, tx, admin, body) - } - - query.src_face.primitives.clone().send_response(Response { - rid: query.src_qid, - wire_expr: key_expr.to_owned(), - payload: body, - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid, - }); - } - None => log::warn!( - "Route reply {}:{} from {}: Query nof found!", - face, - qid, - face - ), - } -} - -pub(crate) fn route_send_response_final( - tables_ref: &Arc, - face: &mut Arc, - qid: RequestId, -) { - let queries_lock = zwrite!(tables_ref.queries_lock); - match get_mut_unchecked(face).pending_queries.remove(&qid) { - Some(query) => { - drop(queries_lock); - log::debug!( - "Received final reply {}:{} from {}", - query.src_face, - qid, - face - ); - finalize_pending_query(query); - } - None => log::warn!( - "Route final reply {}:{} from {}: Query nof found!", - face, - qid, - face - ), - } -} - -pub(crate) fn finalize_pending_queries(tables_ref: &TablesLock, face: &mut Arc) { - let queries_lock = zwrite!(tables_ref.queries_lock); - for (_, query) in get_mut_unchecked(face).pending_queries.drain() { - finalize_pending_query(query); - } - drop(queries_lock); -} - -pub(crate) fn finalize_pending_query(query: Arc) { - if let Some(query) = Arc::into_inner(query) { - log::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); - query - .src_face - .primitives - .clone() - .send_response_final(ResponseFinal { - rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } -} diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 886b0b50dc..c0da3bc0a0 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -17,11 +17,8 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -pub mod face; -pub mod network; -pub mod pubsub; -pub mod queries; -pub mod resource; +pub mod dispatcher; +pub mod hat; pub mod router; use super::runtime; diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index dbf687ba79..06745461e5 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -11,472 +11,35 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::face::{Face, FaceState}; -use super::network::{shared_nodes, Network}; -pub use super::pubsub::*; -pub use super::queries::*; -pub use super::resource::*; +use super::dispatcher::face::{Face, FaceState}; +pub use super::dispatcher::pubsub::*; +pub use super::dispatcher::queries::*; +pub use super::dispatcher::resource::*; +use super::dispatcher::tables::Tables; +use super::dispatcher::tables::TablesLock; +use super::hat::network::{shared_nodes, Network}; +pub use super::hat::pubsub::*; +pub use super::hat::queries::*; use super::runtime::Runtime; use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::LinkStateList; -use async_std::task::JoinHandle; use std::any::Any; -use std::collections::hash_map::DefaultHasher; -use std::collections::{HashMap, HashSet}; -use std::hash::Hasher; use std::str::FromStr; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use std::sync::{Mutex, RwLock}; use std::time::Duration; use uhlc::HLC; use zenoh_link::Link; use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{ExprId, WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{Mapping, NetworkBody, NetworkMessage}; -#[cfg(feature = "stats")] -use zenoh_transport::stats::TransportStats; +use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{ DeMux, DummyPrimitives, McastMux, Mux, Primitives, TransportMulticast, TransportPeer, TransportPeerEventHandler, TransportUnicast, }; // use zenoh_collections::Timer; -use zenoh_core::zconfigurable; use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; - -zconfigurable! { - static ref TREES_COMPUTATION_DELAY: u64 = 100; -} - -pub(crate) struct RoutingExpr<'a> { - pub(crate) prefix: &'a Arc, - pub(crate) suffix: &'a str, - full: Option, -} - -impl<'a> RoutingExpr<'a> { - #[inline] - pub(crate) fn new(prefix: &'a Arc, suffix: &'a str) -> Self { - RoutingExpr { - prefix, - suffix, - full: None, - } - } - - #[inline] - pub(crate) fn full_expr(&mut self) -> &str { - if self.full.is_none() { - self.full = Some(self.prefix.expr() + self.suffix); - } - self.full.as_ref().unwrap() - } -} - -pub struct Tables { - pub(crate) zid: ZenohId, - pub(crate) whatami: WhatAmI, - face_counter: usize, - #[allow(dead_code)] - pub(crate) hlc: Option>, - pub(crate) drop_future_timestamp: bool, - pub(crate) router_peers_failover_brokering: bool, - // pub(crate) timer: Timer, - // pub(crate) queries_default_timeout: Duration, - pub(crate) root_res: Arc, - pub(crate) faces: HashMap>, - pub(crate) mcast_groups: Vec>, - pub(crate) mcast_faces: Vec>, - pub(crate) pull_caches_lock: Mutex<()>, - pub(crate) router_subs: HashSet>, - pub(crate) peer_subs: HashSet>, - pub(crate) router_qabls: HashSet>, - pub(crate) peer_qabls: HashSet>, - pub(crate) routers_net: Option, - pub(crate) peers_net: Option, - pub(crate) shared_nodes: Vec, - pub(crate) routers_trees_task: Option>, - pub(crate) peers_trees_task: Option>, -} - -impl Tables { - pub fn new( - zid: ZenohId, - whatami: WhatAmI, - hlc: Option>, - drop_future_timestamp: bool, - router_peers_failover_brokering: bool, - _queries_default_timeout: Duration, - ) -> Self { - Tables { - zid, - whatami, - face_counter: 0, - hlc, - drop_future_timestamp, - router_peers_failover_brokering, - // timer: Timer::new(true), - // queries_default_timeout, - root_res: Resource::root(), - faces: HashMap::new(), - mcast_groups: vec![], - mcast_faces: vec![], - pull_caches_lock: Mutex::new(()), - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), - routers_net: None, - peers_net: None, - shared_nodes: vec![], - routers_trees_task: None, - peers_trees_task: None, - } - } - - #[doc(hidden)] - pub fn _get_root(&self) -> &Arc { - &self.root_res - } - - pub fn print(&self) -> String { - Resource::print_tree(&self.root_res) - } - - #[inline] - #[allow(clippy::trivially_copy_pass_by_ref)] - pub(crate) fn get_mapping<'a>( - &'a self, - face: &'a FaceState, - expr_id: &ExprId, - mapping: Mapping, - ) -> Option<&'a Arc> { - match expr_id { - 0 => Some(&self.root_res), - expr_id => face.get_mapping(expr_id, mapping), - } - } - - #[inline] - pub(crate) fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { - match net_type { - WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), - _ => None, - } - } - - #[inline] - pub(crate) fn full_net(&self, net_type: WhatAmI) -> bool { - match net_type { - WhatAmI::Router => self - .routers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - WhatAmI::Peer => self - .peers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - _ => false, - } - } - - #[inline] - pub(crate) fn get_face(&self, zid: &ZenohId) -> Option<&Arc> { - self.faces.values().find(|face| face.zid == *zid) - } - - #[inline] - pub(crate) fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net - .as_ref() - .unwrap() - .get_links(peer) - .iter() - .filter(move |nid| { - if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { - node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router - } else { - false - } - }) - } - - #[inline] - pub(crate) fn elect_router<'a>( - &'a self, - key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { - match routers.next() { - None => &self.zid, - Some(router) => { - let hash = |r: &ZenohId| { - let mut hasher = DefaultHasher::new(); - for b in key_expr.as_bytes() { - hasher.write_u8(*b); - } - for b in &r.to_le_bytes()[..r.size()] { - hasher.write_u8(*b); - } - hasher.finish() - }; - let mut res = router; - let mut h = None; - for router2 in routers { - let h2 = hash(router2); - if h2 > *h.get_or_insert_with(|| hash(res)) { - res = router2; - h = Some(h2); - } - } - res - } - } - } - - #[inline] - pub(crate) fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { - // if source_links is empty then gossip is probably disabled in source peer - !source_links.is_empty() && !source_links.contains(&dest) - } - - #[inline] - pub(crate) fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { - self.router_peers_failover_brokering - && self - .peers_net - .as_ref() - .map(|net| Tables::failover_brokering_to(net.get_links(peer1), peer2)) - .unwrap_or(false) - } - - fn open_net_face( - &mut self, - zid: ZenohId, - whatami: WhatAmI, - #[cfg(feature = "stats")] stats: Arc, - primitives: Arc, - link_id: usize, - ) -> Weak { - let fid = self.face_counter; - self.face_counter += 1; - let mut newface = self - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - primitives.clone(), - link_id, - None, - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(self, &mut newface); - queries_new_face(self, &mut newface); - - Arc::downgrade(&newface) - } - - pub fn open_face( - &mut self, - zid: ZenohId, - whatami: WhatAmI, - primitives: Arc, - ) -> Weak { - let fid = self.face_counter; - self.face_counter += 1; - let mut newface = self - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - None, - primitives.clone(), - 0, - None, - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(self, &mut newface); - queries_new_face(self, &mut newface); - - Arc::downgrade(&newface) - } - - fn compute_routes(&mut self, res: &mut Arc) { - compute_data_routes(self, res); - compute_query_routes(self, res); - } - - pub(crate) fn compute_matches_routes(&mut self, res: &mut Arc) { - if res.context.is_some() { - self.compute_routes(res); - - let resclone = res.clone(); - for match_ in &mut get_mut_unchecked(res).context_mut().matches { - let match_ = &mut match_.upgrade().unwrap(); - if !Arc::ptr_eq(match_, &resclone) && match_.context.is_some() { - self.compute_routes(match_); - } - } - } - } - - pub(crate) fn schedule_compute_trees( - &mut self, - tables_ref: Arc, - net_type: WhatAmI, - ) { - log::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => tables.routers_net.as_mut().unwrap().compute_trees(), - _ => tables.peers_net.as_mut().unwrap().compute_trees(), - }; - - log::trace!("Compute routes"); - pubsub_tree_change(&mut tables, &new_childs, net_type); - queries_tree_change(&mut tables, &new_childs, net_type); - - log::trace!("Computations completed"); - match net_type { - WhatAmI::Router => tables.routers_trees_task = None, - _ => tables.peers_trees_task = None, - }; - })); - match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, - }; - } - } -} - -pub fn close_face(tables: &TablesLock, face: &Weak) { - match face.upgrade() { - Some(mut face) => { - log::debug!("Close {}", face); - finalize_pending_queries(tables, &mut face); - - let ctrl_lock = zlock!(tables.ctrl_lock); - let mut wtables = zwrite!(tables.tables); - let mut face_clone = face.clone(); - let face = get_mut_unchecked(&mut face); - for res in face.remote_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.remote_mappings.clear(); - for res in face.local_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.local_mappings.clear(); - - let mut subs_matches = vec![]; - for mut res in face.remote_subs.drain() { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - subs_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; - subs_matches.push(res); - } - } - - let mut qabls_matches = vec![]; - for mut res in face.remote_qabls.drain() { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - qabls_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; - qabls_matches.push(res); - } - } - drop(wtables); - - let mut matches_data_routes = vec![]; - let mut matches_query_routes = vec![]; - let rtables = zread!(tables.tables); - for _match in subs_matches.drain(..) { - matches_data_routes.push((_match.clone(), compute_data_routes_(&rtables, &_match))); - } - for _match in qabls_matches.drain(..) { - matches_query_routes - .push((_match.clone(), compute_query_routes_(&rtables, &_match))); - } - drop(rtables); - - let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - Resource::clean(&mut res); - } - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - Resource::clean(&mut res); - } - wtables.faces.remove(&face.id); - drop(wtables); - drop(ctrl_lock); - } - None => log::error!("Face already closed!"), - } -} - -pub struct TablesLock { - pub tables: RwLock, - pub ctrl_lock: Mutex<()>, - pub queries_lock: RwLock<()>, -} pub struct Router { whatami: WhatAmI, @@ -522,7 +85,7 @@ impl Router { ) { let mut tables = zwrite!(self.tables.tables); if router_full_linkstate | gossip { - tables.routers_net = Some(Network::new( + tables.hat.routers_net = Some(Network::new( "[Routers network]".to_string(), tables.zid, runtime.clone(), @@ -534,7 +97,7 @@ impl Router { )); } if peer_full_linkstate | gossip { - tables.peers_net = Some(Network::new( + tables.hat.peers_net = Some(Network::new( "[Peers network]".to_string(), tables.zid, runtime, @@ -546,9 +109,9 @@ impl Router { )); } if router_full_linkstate && peer_full_linkstate { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } } @@ -581,6 +144,7 @@ impl Router { let link_id = match (self.whatami, whatami) { (WhatAmI::Router, WhatAmI::Router) => tables + .hat .routers_net .as_mut() .unwrap() @@ -588,7 +152,7 @@ impl Router { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = tables.peers_net.as_mut() { + if let Some(net) = tables.hat.peers_net.as_mut() { net.add_link(transport.clone()) } else { 0 @@ -597,10 +161,10 @@ impl Router { _ => 0, }; - if tables.full_net(WhatAmI::Router) && tables.full_net(WhatAmI::Peer) { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + if tables.hat.full_net(WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } @@ -625,13 +189,17 @@ impl Router { match (self.whatami, whatami) { (WhatAmI::Router, WhatAmI::Router) => { - tables.schedule_compute_trees(self.tables.clone(), WhatAmI::Router); + tables + .hat + .schedule_compute_trees(self.tables.clone(), WhatAmI::Router); } (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if tables.full_net(WhatAmI::Peer) { - tables.schedule_compute_trees(self.tables.clone(), WhatAmI::Peer); + if tables.hat.full_net(WhatAmI::Peer) { + tables + .hat + .schedule_compute_trees(self.tables.clone(), WhatAmI::Peer); } } _ => (), @@ -730,6 +298,7 @@ impl TransportPeerEventHandler for LinkStateInterceptor { match (tables.whatami, whatami) { (WhatAmI::Router, WhatAmI::Router) => { for (_, removed_node) in tables + .hat .routers_net .as_mut() .unwrap() @@ -748,14 +317,14 @@ impl TransportPeerEventHandler for LinkStateInterceptor { ); } - if tables.full_net(WhatAmI::Peer) { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + if tables.hat.full_net(WhatAmI::Peer) { + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } - tables.schedule_compute_trees( + tables.hat.schedule_compute_trees( self.tables.clone(), WhatAmI::Router, ); @@ -763,9 +332,9 @@ impl TransportPeerEventHandler for LinkStateInterceptor { (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = tables.peers_net.as_mut() { + if let Some(net) = tables.hat.peers_net.as_mut() { let changes = net.link_states(list.link_states, zid); - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { for (_, removed_node) in changes.removed_nodes { pubsub_remove_node( &mut tables, @@ -780,13 +349,13 @@ impl TransportPeerEventHandler for LinkStateInterceptor { } if tables.whatami == WhatAmI::Router { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } - tables.schedule_compute_trees( + tables.hat.schedule_compute_trees( self.tables.clone(), WhatAmI::Peer, ); @@ -834,41 +403,45 @@ impl TransportPeerEventHandler for LinkStateInterceptor { match (tables.whatami, whatami) { (WhatAmI::Router, WhatAmI::Router) => { for (_, removed_node) in - tables.routers_net.as_mut().unwrap().remove_link(&zid) + tables.hat.routers_net.as_mut().unwrap().remove_link(&zid) { pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); } - if tables.full_net(WhatAmI::Peer) { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + if tables.hat.full_net(WhatAmI::Peer) { + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } - tables.schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + tables + .hat + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { - if tables.full_net(WhatAmI::Peer) { + if tables.hat.full_net(WhatAmI::Peer) { for (_, removed_node) in - tables.peers_net.as_mut().unwrap().remove_link(&zid) + tables.hat.peers_net.as_mut().unwrap().remove_link(&zid) { pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); } if tables.whatami == WhatAmI::Router { - tables.shared_nodes = shared_nodes( - tables.routers_net.as_ref().unwrap(), - tables.peers_net.as_ref().unwrap(), + tables.hat.shared_nodes = shared_nodes( + tables.hat.routers_net.as_ref().unwrap(), + tables.hat.peers_net.as_ref().unwrap(), ); } - tables.schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = tables.peers_net.as_mut() { + tables + .hat + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = tables.hat.peers_net.as_mut() { net.remove_link(&zid); } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 0eb099a098..f2396eec86 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -10,7 +10,7 @@ // // Contributors: // ZettaScale Zenoh Team, -use super::routing::face::Face; +use super::routing::dispatcher::face::Face; use super::Runtime; use crate::key_expr::KeyExpr; use crate::plugins::sealed as plugins; @@ -535,6 +535,7 @@ fn routers_linkstate_data(context: &AdminContext, query: Query) { reply_key, Value::from( tables + .hat .routers_net .as_ref() .map(|net| net.dot()) @@ -562,6 +563,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { reply_key, Value::from( tables + .hat .peers_net .as_ref() .map(|net| net.dot()) @@ -579,7 +581,7 @@ fn peers_linkstate_data(context: &AdminContext, query: Query) { fn subscribers_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.router.tables.tables); - for sub in tables.router_subs.iter() { + for sub in tables.hat.router_subs.iter() { let key = KeyExpr::try_from(format!( "@/router/{}/subscriber/{}", context.zid_str, @@ -596,7 +598,7 @@ fn subscribers_data(context: &AdminContext, query: Query) { fn queryables_data(context: &AdminContext, query: Query) { let tables = zread!(context.runtime.router.tables.tables); - for qabl in tables.router_qabls.iter() { + for qabl in tables.hat.router_qabls.iter() { let key = KeyExpr::try_from(format!( "@/router/{}/queryable/{}", context.zid_str, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 92d369e998..7ca9297aa7 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -21,8 +21,8 @@ mod adminspace; pub mod orchestrator; use super::routing; -use super::routing::face::Face; -use super::routing::pubsub::full_reentrant_route_data; +use super::routing::dispatcher::face::Face; +use super::routing::dispatcher::pubsub::full_reentrant_route_data; use super::routing::router::{LinkStateInterceptor, Router}; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs index 5dadf8d8a9..94f7856098 100644 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -1,752 +1,752 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::net::routing::router::{self, *}; -use std::convert::{TryFrom, TryInto}; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::Duration; -use uhlc::HLC; -use zenoh_buffers::ZBuf; -use zenoh_config::defaults::queries_default_timeout; -use zenoh_core::zlock; -use zenoh_protocol::core::Encoding; -use zenoh_protocol::core::{ - key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, -}; -use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; -use zenoh_protocol::network::declare::Mode; -use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; -use zenoh_protocol::zenoh::{PushBody, Put}; -use zenoh_transport::{DummyPrimitives, Primitives}; - -#[test] -fn base_test() { - let tables = TablesLock { - tables: RwLock::new(Tables::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - false, - true, - Duration::from_millis(queries_default_timeout), - )), - ctrl_lock: Mutex::new(()), - queries_lock: RwLock::new(()), - }; - - let primitives = Arc::new(DummyPrimitives::new()); - let face = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives, - ); - register_expr( - &tables, - &mut face.upgrade().unwrap(), - 1, - &"one/two/three".into(), - ); - register_expr( - &tables, - &mut face.upgrade().unwrap(), - 2, - &"one/deux/trois".into(), - ); - - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - mode: Mode::Push, - }; - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("four/five"), - &sub_info, - ); - - Tables::print(&zread!(tables.tables)); -} - -#[test] -fn match_test() { - let key_exprs = [ - "**", - "a", - "a/b", - "*", - "a/*", - "a/b$*", - "abc", - "xx", - "ab$*", - "abcd", - "ab$*d", - "ab", - "ab/*", - "a/*/c/*/e", - "a/b/c/d/e", - "a/$*b/c/$*d/e", - "a/xb/c/xd/e", - "a/c/e", - "a/b/c/d/x/e", - "ab$*cd", - "abxxcxxd", - "abxxcxxcd", - "abxxcxxcdx", - "a/b/c", - "ab/**", - "**/xyz", - "a/b/xyz/d/e/f/xyz", - "**/xyz$*xyz", - "a/b/xyz/d/e/f/xyz", - "a/**/c/**/e", - "a/b/b/b/c/d/d/d/e", - "a/**/c/*/e/*", - "a/b/b/b/c/d/d/c/d/e/f", - "a/**/c/*/e/*", - "x/abc", - "x/*", - "x/abc$*", - "x/$*abc", - "x/a$*", - "x/a$*de", - "x/abc$*de", - "x/a$*d$*e", - "x/a$*e", - "x/a$*c$*e", - "x/ade", - "x/c$*", - "x/$*d", - "x/$*e", - ] - .map(|s| keyexpr::new(s).unwrap()); - - let tables = TablesLock { - tables: RwLock::new(Tables::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - false, - true, - Duration::from_millis(queries_default_timeout), - )), - ctrl_lock: Mutex::new(()), - queries_lock: RwLock::new(()), - }; - let primitives = Arc::new(DummyPrimitives::new()); - let face = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives, - ); - for (i, key_expr) in key_exprs.iter().enumerate() { - register_expr( - &tables, - &mut face.upgrade().unwrap(), - i.try_into().unwrap(), - &(*key_expr).into(), - ); - } - - for key_expr1 in key_exprs.iter() { - let res_matches = Resource::get_matches(&zread!(tables.tables), key_expr1); - dbg!(res_matches.len()); - for key_expr2 in key_exprs.iter() { - if res_matches - .iter() - .map(|m| m.upgrade().unwrap().expr()) - .any(|x| x.as_str() == key_expr2.as_str()) - { - assert!(dbg!(dbg!(key_expr1).intersects(dbg!(key_expr2)))); - } else { - assert!(!dbg!(dbg!(key_expr1).intersects(dbg!(key_expr2)))); - } - } - } -} - -#[test] -fn clean_test() { - let tables = TablesLock { - tables: RwLock::new(Tables::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - false, - true, - Duration::from_millis(queries_default_timeout), - )), - ctrl_lock: Mutex::new(()), - queries_lock: RwLock::new(()), - }; - - let primitives = Arc::new(DummyPrimitives::new()); - let face0 = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives, - ); - assert!(face0.upgrade().is_some()); - - // -------------- - register_expr(&tables, &mut face0.upgrade().unwrap(), 1, &"todrop1".into()); - let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1") - .map(|res| Arc::downgrade(&res)); - assert!(optres1.is_some()); - let res1 = optres1.unwrap(); - assert!(res1.upgrade().is_some()); - - register_expr( - &tables, - &mut face0.upgrade().unwrap(), - 2, - &"todrop1/todrop11".into(), - ); - let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") - .map(|res| Arc::downgrade(&res)); - assert!(optres2.is_some()); - let res2 = optres2.unwrap(); - assert!(res2.upgrade().is_some()); - - register_expr(&tables, &mut face0.upgrade().unwrap(), 3, &"**".into()); - let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "**") - .map(|res| Arc::downgrade(&res)); - assert!(optres3.is_some()); - let res3 = optres3.unwrap(); - assert!(res3.upgrade().is_some()); - - unregister_expr(&tables, &mut face0.upgrade().unwrap(), 1); - assert!(res1.upgrade().is_some()); - assert!(res2.upgrade().is_some()); - assert!(res3.upgrade().is_some()); - - unregister_expr(&tables, &mut face0.upgrade().unwrap(), 2); - assert!(res1.upgrade().is_none()); - assert!(res2.upgrade().is_none()); - assert!(res3.upgrade().is_some()); - - unregister_expr(&tables, &mut face0.upgrade().unwrap(), 3); - assert!(res1.upgrade().is_none()); - assert!(res2.upgrade().is_none()); - assert!(res3.upgrade().is_none()); - - // -------------- - register_expr(&tables, &mut face0.upgrade().unwrap(), 1, &"todrop1".into()); - let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1") - .map(|res| Arc::downgrade(&res)); - assert!(optres1.is_some()); - let res1 = optres1.unwrap(); - assert!(res1.upgrade().is_some()); - - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - mode: Mode::Push, - }; - - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop1/todrop11".into(), - &sub_info, - ); - let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") - .map(|res| Arc::downgrade(&res)); - assert!(optres2.is_some()); - let res2 = optres2.unwrap(); - assert!(res2.upgrade().is_some()); - - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("/todrop12"), - &sub_info, - ); - let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop12") - .map(|res| Arc::downgrade(&res)); - assert!(optres3.is_some()); - let res3 = optres3.unwrap(); - assert!(res3.upgrade().is_some()); - - forget_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &WireExpr::from(1).with_suffix("/todrop12"), - ); - assert!(res1.upgrade().is_some()); - assert!(res2.upgrade().is_some()); - assert!(res3.upgrade().is_none()); - - forget_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop1/todrop11".into(), - ); - assert!(res1.upgrade().is_some()); - assert!(res2.upgrade().is_none()); - assert!(res3.upgrade().is_none()); - - unregister_expr(&tables, &mut face0.upgrade().unwrap(), 1); - assert!(res1.upgrade().is_none()); - assert!(res2.upgrade().is_none()); - assert!(res3.upgrade().is_none()); - - // -------------- - register_expr(&tables, &mut face0.upgrade().unwrap(), 2, &"todrop3".into()); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop3".into(), - &sub_info, - ); - let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop3") - .map(|res| Arc::downgrade(&res)); - assert!(optres1.is_some()); - let res1 = optres1.unwrap(); - assert!(res1.upgrade().is_some()); - - forget_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop3".into(), - ); - assert!(res1.upgrade().is_some()); - - unregister_expr(&tables, &mut face0.upgrade().unwrap(), 2); - assert!(res1.upgrade().is_none()); - - // -------------- - register_expr(&tables, &mut face0.upgrade().unwrap(), 3, &"todrop4".into()); - register_expr(&tables, &mut face0.upgrade().unwrap(), 4, &"todrop5".into()); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop5".into(), - &sub_info, - ); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &"todrop6".into(), - &sub_info, - ); - - let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop4") - .map(|res| Arc::downgrade(&res)); - assert!(optres1.is_some()); - let res1 = optres1.unwrap(); - let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop5") - .map(|res| Arc::downgrade(&res)); - assert!(optres2.is_some()); - let res2 = optres2.unwrap(); - let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop6") - .map(|res| Arc::downgrade(&res)); - assert!(optres3.is_some()); - let res3 = optres3.unwrap(); - - assert!(res1.upgrade().is_some()); - assert!(res2.upgrade().is_some()); - assert!(res3.upgrade().is_some()); - - router::close_face(&tables, &face0); - assert!(face0.upgrade().is_none()); - assert!(res1.upgrade().is_none()); - assert!(res2.upgrade().is_none()); - assert!(res3.upgrade().is_none()); -} - -pub struct ClientPrimitives { - data: std::sync::Mutex>>, - mapping: std::sync::Mutex>, -} - -impl ClientPrimitives { - pub fn new() -> ClientPrimitives { - ClientPrimitives { - data: std::sync::Mutex::new(None), - mapping: std::sync::Mutex::new(std::collections::HashMap::new()), - } - } - - pub fn clear_data(&self) { - *self.data.lock().unwrap() = None; - } -} - -impl Default for ClientPrimitives { - fn default() -> Self { - Self::new() - } -} - -impl ClientPrimitives { - fn get_name(&self, key_expr: &WireExpr) -> String { - let mapping = self.mapping.lock().unwrap(); - let (scope, suffix) = key_expr.as_id_and_suffix(); - if scope == EMPTY_EXPR_ID { - suffix.to_string() - } else if suffix.is_empty() { - mapping.get(&scope).unwrap().clone() - } else { - format!("{}{}", mapping.get(&scope).unwrap(), suffix) - } - } - - fn get_last_name(&self) -> Option { - self.data - .lock() - .unwrap() - .as_ref() - .map(|data| self.get_name(data)) - } - - #[allow(dead_code)] - fn get_last_key(&self) -> Option { - self.data.lock().unwrap().as_ref().cloned() - } -} - -impl Primitives for ClientPrimitives { - fn send_declare(&self, msg: zenoh_protocol::network::Declare) { - match msg.body { - DeclareBody::DeclareKeyExpr(d) => { - let name = self.get_name(&d.wire_expr); - zlock!(self.mapping).insert(d.id, name); - } - DeclareBody::UndeclareKeyExpr(u) => { - zlock!(self.mapping).remove(&u.id); - } - _ => (), - } - } - - fn send_push(&self, msg: zenoh_protocol::network::Push) { - *zlock!(self.data) = Some(msg.wire_expr.to_owned()); - } - - fn send_request(&self, _msg: zenoh_protocol::network::Request) {} - - fn send_response(&self, _msg: zenoh_protocol::network::Response) {} - - fn send_response_final(&self, _msg: zenoh_protocol::network::ResponseFinal) {} - - fn send_close(&self) {} -} - -#[test] -fn client_test() { - let tables = TablesLock { - tables: RwLock::new(Tables::new( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - Some(Arc::new(HLC::default())), - false, - true, - Duration::from_millis(queries_default_timeout), - )), - ctrl_lock: Mutex::new(()), - queries_lock: RwLock::new(()), - }; - - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, - mode: Mode::Push, - }; - - let primitives0 = Arc::new(ClientPrimitives::new()); - - let face0 = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives0.clone(), - ); - register_expr( - &tables, - &mut face0.upgrade().unwrap(), - 11, - &"test/client".into(), - ); - primitives0.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: 11, - wire_expr: "test/client".into(), - }), - }); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face0.upgrade().unwrap(), - &WireExpr::from(11).with_suffix("/**"), - &sub_info, - ); - register_expr( - &tables, - &mut face0.upgrade().unwrap(), - 12, - &WireExpr::from(11).with_suffix("/z1_pub1"), - ); - primitives0.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: 12, - wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), - }), - }); - - let primitives1 = Arc::new(ClientPrimitives::new()); - let face1 = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives1.clone(), - ); - register_expr( - &tables, - &mut face1.upgrade().unwrap(), - 21, - &"test/client".into(), - ); - primitives1.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: 21, - wire_expr: "test/client".into(), - }), - }); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face1.upgrade().unwrap(), - &WireExpr::from(21).with_suffix("/**"), - &sub_info, - ); - register_expr( - &tables, - &mut face1.upgrade().unwrap(), - 22, - &WireExpr::from(21).with_suffix("/z2_pub1"), - ); - primitives1.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: 22, - wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), - }), - }); - - let primitives2 = Arc::new(ClientPrimitives::new()); - let face2 = zwrite!(tables.tables).open_face( - ZenohId::try_from([1]).unwrap(), - WhatAmI::Client, - primitives2.clone(), - ); - register_expr( - &tables, - &mut face2.upgrade().unwrap(), - 31, - &"test/client".into(), - ); - primitives2.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: 31, - wire_expr: "test/client".into(), - }), - }); - declare_client_subscription( - &tables, - zread!(tables.tables), - &mut face2.upgrade().unwrap(), - &WireExpr::from(31).with_suffix("/**"), - &sub_info, - ); - - primitives0.clear_data(); - primitives1.clear_data(); - primitives2.clear_data(); - - full_reentrant_route_data( - &tables.tables, - &face0.upgrade().unwrap(), - &"test/client/z1_wr1".into(), - ext::QoSType::default(), - PushBody::Put(Put { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: ZBuf::empty(), - }), - 0, - ); - - // functionnal check - assert!(primitives1.get_last_name().is_some()); - assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr1"); - // mapping strategy check - // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr1".to_string())); - - // functionnal check - assert!(primitives2.get_last_name().is_some()); - assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr1"); - // mapping strategy check - // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_wr1".to_string())); - - primitives0.clear_data(); - primitives1.clear_data(); - primitives2.clear_data(); - full_reentrant_route_data( - &tables.tables, - &face0.upgrade().unwrap(), - &WireExpr::from(11).with_suffix("/z1_wr2"), - ext::QoSType::default(), - PushBody::Put(Put { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: ZBuf::empty(), - }), - 0, - ); - - // functionnal check - assert!(primitives1.get_last_name().is_some()); - assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr2"); - // mapping strategy check - // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr2".to_string())); - - // functionnal check - assert!(primitives2.get_last_name().is_some()); - assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr2"); - // mapping strategy check - // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_wr2".to_string())); - - primitives0.clear_data(); - primitives1.clear_data(); - primitives2.clear_data(); - full_reentrant_route_data( - &tables.tables, - &face1.upgrade().unwrap(), - &"test/client/**".into(), - ext::QoSType::default(), - PushBody::Put(Put { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: ZBuf::empty(), - }), - 0, - ); - - // functionnal check - assert!(primitives0.get_last_name().is_some()); - assert_eq!(primitives0.get_last_name().unwrap(), "test/client/**"); - // mapping strategy check - // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/**".to_string())); - - // functionnal check - assert!(primitives2.get_last_name().is_some()); - assert_eq!(primitives2.get_last_name().unwrap(), "test/client/**"); - // mapping strategy check - // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/**".to_string())); - - primitives0.clear_data(); - primitives1.clear_data(); - primitives2.clear_data(); - full_reentrant_route_data( - &tables.tables, - &face0.upgrade().unwrap(), - &12.into(), - ext::QoSType::default(), - PushBody::Put(Put { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: ZBuf::empty(), - }), - 0, - ); - - // functionnal check - assert!(primitives1.get_last_name().is_some()); - assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_pub1"); - // mapping strategy check - // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_pub1".to_string())); - - // functionnal check - assert!(primitives2.get_last_name().is_some()); - assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_pub1"); - // mapping strategy check - // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_pub1".to_string())); - - primitives0.clear_data(); - primitives1.clear_data(); - primitives2.clear_data(); - full_reentrant_route_data( - &tables.tables, - &face1.upgrade().unwrap(), - &22.into(), - ext::QoSType::default(), - PushBody::Put(Put { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_unknown: vec![], - payload: ZBuf::empty(), - }), - 0, - ); - - // functionnal check - assert!(primitives0.get_last_name().is_some()); - assert_eq!(primitives0.get_last_name().unwrap(), "test/client/z2_pub1"); - // mapping strategy check - // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/z2_pub1".to_string())); - - // functionnal check - assert!(primitives2.get_last_name().is_some()); - assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z2_pub1"); - // mapping strategy check - // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z2_pub1".to_string())); -} +// // +// // Copyright (c) 2023 ZettaScale Technology +// // +// // This program and the accompanying materials are made available under the +// // terms of the Eclipse Public License 2.0 which is available at +// // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// // which is available at https://www.apache.org/licenses/LICENSE-2.0. +// // +// // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// // +// // Contributors: +// // ZettaScale Zenoh Team, +// // +// use crate::net::routing::router::{self, *}; +// use std::convert::{TryFrom, TryInto}; +// use std::sync::{Arc, Mutex, RwLock}; +// use std::time::Duration; +// use uhlc::HLC; +// use zenoh_buffers::ZBuf; +// use zenoh_config::defaults::queries_default_timeout; +// use zenoh_core::zlock; +// use zenoh_protocol::core::Encoding; +// use zenoh_protocol::core::{ +// key_expr::keyexpr, ExprId, Reliability, WhatAmI, WireExpr, ZenohId, EMPTY_EXPR_ID, +// }; +// use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; +// use zenoh_protocol::network::declare::Mode; +// use zenoh_protocol::network::{ext, Declare, DeclareBody, DeclareKeyExpr}; +// use zenoh_protocol::zenoh::{PushBody, Put}; +// use zenoh_transport::{DummyPrimitives, Primitives}; + +// #[test] +// fn base_test() { +// let tables = TablesLock { +// tables: RwLock::new(Tables::new( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// Some(Arc::new(HLC::default())), +// false, +// true, +// Duration::from_millis(queries_default_timeout), +// )), +// ctrl_lock: Mutex::new(()), +// queries_lock: RwLock::new(()), +// }; + +// let primitives = Arc::new(DummyPrimitives::new()); +// let face = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives, +// ); +// register_expr( +// &tables, +// &mut face.upgrade().unwrap(), +// 1, +// &"one/two/three".into(), +// ); +// register_expr( +// &tables, +// &mut face.upgrade().unwrap(), +// 2, +// &"one/deux/trois".into(), +// ); + +// let sub_info = SubscriberInfo { +// reliability: Reliability::Reliable, +// mode: Mode::Push, +// }; +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face.upgrade().unwrap(), +// &WireExpr::from(1).with_suffix("four/five"), +// &sub_info, +// ); + +// Tables::print(&zread!(tables.tables)); +// } + +// #[test] +// fn match_test() { +// let key_exprs = [ +// "**", +// "a", +// "a/b", +// "*", +// "a/*", +// "a/b$*", +// "abc", +// "xx", +// "ab$*", +// "abcd", +// "ab$*d", +// "ab", +// "ab/*", +// "a/*/c/*/e", +// "a/b/c/d/e", +// "a/$*b/c/$*d/e", +// "a/xb/c/xd/e", +// "a/c/e", +// "a/b/c/d/x/e", +// "ab$*cd", +// "abxxcxxd", +// "abxxcxxcd", +// "abxxcxxcdx", +// "a/b/c", +// "ab/**", +// "**/xyz", +// "a/b/xyz/d/e/f/xyz", +// "**/xyz$*xyz", +// "a/b/xyz/d/e/f/xyz", +// "a/**/c/**/e", +// "a/b/b/b/c/d/d/d/e", +// "a/**/c/*/e/*", +// "a/b/b/b/c/d/d/c/d/e/f", +// "a/**/c/*/e/*", +// "x/abc", +// "x/*", +// "x/abc$*", +// "x/$*abc", +// "x/a$*", +// "x/a$*de", +// "x/abc$*de", +// "x/a$*d$*e", +// "x/a$*e", +// "x/a$*c$*e", +// "x/ade", +// "x/c$*", +// "x/$*d", +// "x/$*e", +// ] +// .map(|s| keyexpr::new(s).unwrap()); + +// let tables = TablesLock { +// tables: RwLock::new(Tables::new( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// Some(Arc::new(HLC::default())), +// false, +// true, +// Duration::from_millis(queries_default_timeout), +// )), +// ctrl_lock: Mutex::new(()), +// queries_lock: RwLock::new(()), +// }; +// let primitives = Arc::new(DummyPrimitives::new()); +// let face = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives, +// ); +// for (i, key_expr) in key_exprs.iter().enumerate() { +// register_expr( +// &tables, +// &mut face.upgrade().unwrap(), +// i.try_into().unwrap(), +// &(*key_expr).into(), +// ); +// } + +// for key_expr1 in key_exprs.iter() { +// let res_matches = Resource::get_matches(&zread!(tables.tables), key_expr1); +// dbg!(res_matches.len()); +// for key_expr2 in key_exprs.iter() { +// if res_matches +// .iter() +// .map(|m| m.upgrade().unwrap().expr()) +// .any(|x| x.as_str() == key_expr2.as_str()) +// { +// assert!(dbg!(dbg!(key_expr1).intersects(dbg!(key_expr2)))); +// } else { +// assert!(!dbg!(dbg!(key_expr1).intersects(dbg!(key_expr2)))); +// } +// } +// } +// } + +// #[test] +// fn clean_test() { +// let tables = TablesLock { +// tables: RwLock::new(Tables::new( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// Some(Arc::new(HLC::default())), +// false, +// true, +// Duration::from_millis(queries_default_timeout), +// )), +// ctrl_lock: Mutex::new(()), +// queries_lock: RwLock::new(()), +// }; + +// let primitives = Arc::new(DummyPrimitives::new()); +// let face0 = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives, +// ); +// assert!(face0.upgrade().is_some()); + +// // -------------- +// register_expr(&tables, &mut face0.upgrade().unwrap(), 1, &"todrop1".into()); +// let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres1.is_some()); +// let res1 = optres1.unwrap(); +// assert!(res1.upgrade().is_some()); + +// register_expr( +// &tables, +// &mut face0.upgrade().unwrap(), +// 2, +// &"todrop1/todrop11".into(), +// ); +// let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres2.is_some()); +// let res2 = optres2.unwrap(); +// assert!(res2.upgrade().is_some()); + +// register_expr(&tables, &mut face0.upgrade().unwrap(), 3, &"**".into()); +// let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "**") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres3.is_some()); +// let res3 = optres3.unwrap(); +// assert!(res3.upgrade().is_some()); + +// unregister_expr(&tables, &mut face0.upgrade().unwrap(), 1); +// assert!(res1.upgrade().is_some()); +// assert!(res2.upgrade().is_some()); +// assert!(res3.upgrade().is_some()); + +// unregister_expr(&tables, &mut face0.upgrade().unwrap(), 2); +// assert!(res1.upgrade().is_none()); +// assert!(res2.upgrade().is_none()); +// assert!(res3.upgrade().is_some()); + +// unregister_expr(&tables, &mut face0.upgrade().unwrap(), 3); +// assert!(res1.upgrade().is_none()); +// assert!(res2.upgrade().is_none()); +// assert!(res3.upgrade().is_none()); + +// // -------------- +// register_expr(&tables, &mut face0.upgrade().unwrap(), 1, &"todrop1".into()); +// let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres1.is_some()); +// let res1 = optres1.unwrap(); +// assert!(res1.upgrade().is_some()); + +// let sub_info = SubscriberInfo { +// reliability: Reliability::Reliable, +// mode: Mode::Push, +// }; + +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop1/todrop11".into(), +// &sub_info, +// ); +// let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop11") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres2.is_some()); +// let res2 = optres2.unwrap(); +// assert!(res2.upgrade().is_some()); + +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &WireExpr::from(1).with_suffix("/todrop12"), +// &sub_info, +// ); +// let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop1/todrop12") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres3.is_some()); +// let res3 = optres3.unwrap(); +// assert!(res3.upgrade().is_some()); + +// forget_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &WireExpr::from(1).with_suffix("/todrop12"), +// ); +// assert!(res1.upgrade().is_some()); +// assert!(res2.upgrade().is_some()); +// assert!(res3.upgrade().is_none()); + +// forget_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop1/todrop11".into(), +// ); +// assert!(res1.upgrade().is_some()); +// assert!(res2.upgrade().is_none()); +// assert!(res3.upgrade().is_none()); + +// unregister_expr(&tables, &mut face0.upgrade().unwrap(), 1); +// assert!(res1.upgrade().is_none()); +// assert!(res2.upgrade().is_none()); +// assert!(res3.upgrade().is_none()); + +// // -------------- +// register_expr(&tables, &mut face0.upgrade().unwrap(), 2, &"todrop3".into()); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop3".into(), +// &sub_info, +// ); +// let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop3") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres1.is_some()); +// let res1 = optres1.unwrap(); +// assert!(res1.upgrade().is_some()); + +// forget_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop3".into(), +// ); +// assert!(res1.upgrade().is_some()); + +// unregister_expr(&tables, &mut face0.upgrade().unwrap(), 2); +// assert!(res1.upgrade().is_none()); + +// // -------------- +// register_expr(&tables, &mut face0.upgrade().unwrap(), 3, &"todrop4".into()); +// register_expr(&tables, &mut face0.upgrade().unwrap(), 4, &"todrop5".into()); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop5".into(), +// &sub_info, +// ); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &"todrop6".into(), +// &sub_info, +// ); + +// let optres1 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop4") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres1.is_some()); +// let res1 = optres1.unwrap(); +// let optres2 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop5") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres2.is_some()); +// let res2 = optres2.unwrap(); +// let optres3 = Resource::get_resource(zread!(tables.tables)._get_root(), "todrop6") +// .map(|res| Arc::downgrade(&res)); +// assert!(optres3.is_some()); +// let res3 = optres3.unwrap(); + +// assert!(res1.upgrade().is_some()); +// assert!(res2.upgrade().is_some()); +// assert!(res3.upgrade().is_some()); + +// tables::close_face(&tables, &face0); +// assert!(face0.upgrade().is_none()); +// assert!(res1.upgrade().is_none()); +// assert!(res2.upgrade().is_none()); +// assert!(res3.upgrade().is_none()); +// } + +// pub struct ClientPrimitives { +// data: std::sync::Mutex>>, +// mapping: std::sync::Mutex>, +// } + +// impl ClientPrimitives { +// pub fn new() -> ClientPrimitives { +// ClientPrimitives { +// data: std::sync::Mutex::new(None), +// mapping: std::sync::Mutex::new(std::collections::HashMap::new()), +// } +// } + +// pub fn clear_data(&self) { +// *self.data.lock().unwrap() = None; +// } +// } + +// impl Default for ClientPrimitives { +// fn default() -> Self { +// Self::new() +// } +// } + +// impl ClientPrimitives { +// fn get_name(&self, key_expr: &WireExpr) -> String { +// let mapping = self.mapping.lock().unwrap(); +// let (scope, suffix) = key_expr.as_id_and_suffix(); +// if scope == EMPTY_EXPR_ID { +// suffix.to_string() +// } else if suffix.is_empty() { +// mapping.get(&scope).unwrap().clone() +// } else { +// format!("{}{}", mapping.get(&scope).unwrap(), suffix) +// } +// } + +// fn get_last_name(&self) -> Option { +// self.data +// .lock() +// .unwrap() +// .as_ref() +// .map(|data| self.get_name(data)) +// } + +// #[allow(dead_code)] +// fn get_last_key(&self) -> Option { +// self.data.lock().unwrap().as_ref().cloned() +// } +// } + +// impl Primitives for ClientPrimitives { +// fn send_declare(&self, msg: zenoh_protocol::network::Declare) { +// match msg.body { +// DeclareBody::DeclareKeyExpr(d) => { +// let name = self.get_name(&d.wire_expr); +// zlock!(self.mapping).insert(d.id, name); +// } +// DeclareBody::UndeclareKeyExpr(u) => { +// zlock!(self.mapping).remove(&u.id); +// } +// _ => (), +// } +// } + +// fn send_push(&self, msg: zenoh_protocol::network::Push) { +// *zlock!(self.data) = Some(msg.wire_expr.to_owned()); +// } + +// fn send_request(&self, _msg: zenoh_protocol::network::Request) {} + +// fn send_response(&self, _msg: zenoh_protocol::network::Response) {} + +// fn send_response_final(&self, _msg: zenoh_protocol::network::ResponseFinal) {} + +// fn send_close(&self) {} +// } + +// #[test] +// fn client_test() { +// let tables = TablesLock { +// tables: RwLock::new(Tables::new( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// Some(Arc::new(HLC::default())), +// false, +// true, +// Duration::from_millis(queries_default_timeout), +// )), +// ctrl_lock: Mutex::new(()), +// queries_lock: RwLock::new(()), +// }; + +// let sub_info = SubscriberInfo { +// reliability: Reliability::Reliable, +// mode: Mode::Push, +// }; + +// let primitives0 = Arc::new(ClientPrimitives::new()); + +// let face0 = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives0.clone(), +// ); +// register_expr( +// &tables, +// &mut face0.upgrade().unwrap(), +// 11, +// &"test/client".into(), +// ); +// primitives0.send_declare(Declare { +// ext_qos: ext::QoSType::declare_default(), +// ext_tstamp: None, +// ext_nodeid: ext::NodeIdType::default(), +// body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { +// id: 11, +// wire_expr: "test/client".into(), +// }), +// }); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face0.upgrade().unwrap(), +// &WireExpr::from(11).with_suffix("/**"), +// &sub_info, +// ); +// register_expr( +// &tables, +// &mut face0.upgrade().unwrap(), +// 12, +// &WireExpr::from(11).with_suffix("/z1_pub1"), +// ); +// primitives0.send_declare(Declare { +// ext_qos: ext::QoSType::declare_default(), +// ext_tstamp: None, +// ext_nodeid: ext::NodeIdType::default(), +// body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { +// id: 12, +// wire_expr: WireExpr::from(11).with_suffix("/z1_pub1"), +// }), +// }); + +// let primitives1 = Arc::new(ClientPrimitives::new()); +// let face1 = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives1.clone(), +// ); +// register_expr( +// &tables, +// &mut face1.upgrade().unwrap(), +// 21, +// &"test/client".into(), +// ); +// primitives1.send_declare(Declare { +// ext_qos: ext::QoSType::declare_default(), +// ext_tstamp: None, +// ext_nodeid: ext::NodeIdType::default(), +// body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { +// id: 21, +// wire_expr: "test/client".into(), +// }), +// }); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face1.upgrade().unwrap(), +// &WireExpr::from(21).with_suffix("/**"), +// &sub_info, +// ); +// register_expr( +// &tables, +// &mut face1.upgrade().unwrap(), +// 22, +// &WireExpr::from(21).with_suffix("/z2_pub1"), +// ); +// primitives1.send_declare(Declare { +// ext_qos: ext::QoSType::declare_default(), +// ext_tstamp: None, +// ext_nodeid: ext::NodeIdType::default(), +// body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { +// id: 22, +// wire_expr: WireExpr::from(21).with_suffix("/z2_pub1"), +// }), +// }); + +// let primitives2 = Arc::new(ClientPrimitives::new()); +// let face2 = zwrite!(tables.tables).open_face( +// ZenohId::try_from([1]).unwrap(), +// WhatAmI::Client, +// primitives2.clone(), +// ); +// register_expr( +// &tables, +// &mut face2.upgrade().unwrap(), +// 31, +// &"test/client".into(), +// ); +// primitives2.send_declare(Declare { +// ext_qos: ext::QoSType::declare_default(), +// ext_tstamp: None, +// ext_nodeid: ext::NodeIdType::default(), +// body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { +// id: 31, +// wire_expr: "test/client".into(), +// }), +// }); +// declare_client_subscription( +// &tables, +// zread!(tables.tables), +// &mut face2.upgrade().unwrap(), +// &WireExpr::from(31).with_suffix("/**"), +// &sub_info, +// ); + +// primitives0.clear_data(); +// primitives1.clear_data(); +// primitives2.clear_data(); + +// full_reentrant_route_data( +// &tables.tables, +// &face0.upgrade().unwrap(), +// &"test/client/z1_wr1".into(), +// ext::QoSType::default(), +// PushBody::Put(Put { +// timestamp: None, +// encoding: Encoding::default(), +// ext_sinfo: None, +// #[cfg(feature = "shared-memory")] +// ext_shm: None, +// ext_unknown: vec![], +// payload: ZBuf::empty(), +// }), +// 0, +// ); + +// // functionnal check +// assert!(primitives1.get_last_name().is_some()); +// assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr1"); +// // mapping strategy check +// // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr1".to_string())); + +// // functionnal check +// assert!(primitives2.get_last_name().is_some()); +// assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr1"); +// // mapping strategy check +// // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_wr1".to_string())); + +// primitives0.clear_data(); +// primitives1.clear_data(); +// primitives2.clear_data(); +// full_reentrant_route_data( +// &tables.tables, +// &face0.upgrade().unwrap(), +// &WireExpr::from(11).with_suffix("/z1_wr2"), +// ext::QoSType::default(), +// PushBody::Put(Put { +// timestamp: None, +// encoding: Encoding::default(), +// ext_sinfo: None, +// #[cfg(feature = "shared-memory")] +// ext_shm: None, +// ext_unknown: vec![], +// payload: ZBuf::empty(), +// }), +// 0, +// ); + +// // functionnal check +// assert!(primitives1.get_last_name().is_some()); +// assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_wr2"); +// // mapping strategy check +// // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_wr2".to_string())); + +// // functionnal check +// assert!(primitives2.get_last_name().is_some()); +// assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_wr2"); +// // mapping strategy check +// // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_wr2".to_string())); + +// primitives0.clear_data(); +// primitives1.clear_data(); +// primitives2.clear_data(); +// full_reentrant_route_data( +// &tables.tables, +// &face1.upgrade().unwrap(), +// &"test/client/**".into(), +// ext::QoSType::default(), +// PushBody::Put(Put { +// timestamp: None, +// encoding: Encoding::default(), +// ext_sinfo: None, +// #[cfg(feature = "shared-memory")] +// ext_shm: None, +// ext_unknown: vec![], +// payload: ZBuf::empty(), +// }), +// 0, +// ); + +// // functionnal check +// assert!(primitives0.get_last_name().is_some()); +// assert_eq!(primitives0.get_last_name().unwrap(), "test/client/**"); +// // mapping strategy check +// // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/**".to_string())); + +// // functionnal check +// assert!(primitives2.get_last_name().is_some()); +// assert_eq!(primitives2.get_last_name().unwrap(), "test/client/**"); +// // mapping strategy check +// // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/**".to_string())); + +// primitives0.clear_data(); +// primitives1.clear_data(); +// primitives2.clear_data(); +// full_reentrant_route_data( +// &tables.tables, +// &face0.upgrade().unwrap(), +// &12.into(), +// ext::QoSType::default(), +// PushBody::Put(Put { +// timestamp: None, +// encoding: Encoding::default(), +// ext_sinfo: None, +// #[cfg(feature = "shared-memory")] +// ext_shm: None, +// ext_unknown: vec![], +// payload: ZBuf::empty(), +// }), +// 0, +// ); + +// // functionnal check +// assert!(primitives1.get_last_name().is_some()); +// assert_eq!(primitives1.get_last_name().unwrap(), "test/client/z1_pub1"); +// // mapping strategy check +// // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(21, "/z1_pub1".to_string())); + +// // functionnal check +// assert!(primitives2.get_last_name().is_some()); +// assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z1_pub1"); +// // mapping strategy check +// // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z1_pub1".to_string())); + +// primitives0.clear_data(); +// primitives1.clear_data(); +// primitives2.clear_data(); +// full_reentrant_route_data( +// &tables.tables, +// &face1.upgrade().unwrap(), +// &22.into(), +// ext::QoSType::default(), +// PushBody::Put(Put { +// timestamp: None, +// encoding: Encoding::default(), +// ext_sinfo: None, +// #[cfg(feature = "shared-memory")] +// ext_shm: None, +// ext_unknown: vec![], +// payload: ZBuf::empty(), +// }), +// 0, +// ); + +// // functionnal check +// assert!(primitives0.get_last_name().is_some()); +// assert_eq!(primitives0.get_last_name().unwrap(), "test/client/z2_pub1"); +// // mapping strategy check +// // assert_eq!(primitives1.get_last_key().unwrap(), KeyExpr::IdWithSuffix(11, "/z2_pub1".to_string())); + +// // functionnal check +// assert!(primitives2.get_last_name().is_some()); +// assert_eq!(primitives2.get_last_name().unwrap(), "test/client/z2_pub1"); +// // mapping strategy check +// // assert_eq!(primitives2.get_last_key().unwrap(), KeyExpr::IdWithSuffix(31, "/z2_pub1".to_string())); +// } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 744f21965f..29a87c24d3 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -20,7 +20,7 @@ use crate::info::*; use crate::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; -use crate::net::routing::face::Face; +use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; use crate::net::transport::Primitives; use crate::prelude::Locality; From ec960ea6c7ad9a4748c1f66bf60a1497b1b1e45e Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 2 Nov 2023 12:13:56 +0100 Subject: [PATCH 002/122] Code move --- zenoh/src/net/routing/dispatcher/face.rs | 19 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 169 +---------- zenoh/src/net/routing/dispatcher/queries.rs | 235 +-------------- zenoh/src/net/routing/dispatcher/resource.rs | 15 +- zenoh/src/net/routing/dispatcher/tables.rs | 86 +----- zenoh/src/net/routing/hat/mod.rs | 136 ++++++++- zenoh/src/net/routing/hat/pubsub.rs | 255 +++++++++++++--- zenoh/src/net/routing/hat/queries.rs | 302 +++++++++++++++++-- 8 files changed, 647 insertions(+), 570 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 17bf398bc1..c2cc6dea69 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -1,3 +1,5 @@ +use crate::net::routing::hat::HatFace; + // // Copyright (c) 2023 ZettaScale Technology // @@ -14,16 +16,13 @@ use super::super::router::*; use super::tables::{Tables, TablesLock}; use super::{resource::*, tables}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::fmt; use std::sync::Arc; use zenoh_protocol::zenoh::RequestBody; use zenoh_protocol::{ core::{ExprId, WhatAmI, ZenohId}, - network::{ - declare::queryable::ext::QueryableInfo, Mapping, Push, Request, RequestId, Response, - ResponseFinal, - }, + network::{Mapping, Push, Request, RequestId, Response, ResponseFinal}, }; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; @@ -39,13 +38,10 @@ pub struct FaceState { pub(crate) link_id: usize, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, - pub(crate) local_subs: HashSet>, - pub(crate) remote_subs: HashSet>, - pub(crate) local_qabls: HashMap, QueryableInfo>, - pub(crate) remote_qabls: HashSet>, pub(crate) next_qid: RequestId, pub(crate) pending_queries: HashMap>, pub(crate) mcast_group: Option, + pub(crate) hat: HatFace, } impl FaceState { @@ -68,13 +64,10 @@ impl FaceState { link_id, local_mappings: HashMap::new(), remote_mappings: HashMap::new(), - local_subs: HashSet::new(), - remote_subs: HashSet::new(), - local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), next_qid: 0, pending_queries: HashMap::new(), mcast_group, + hat: HatFace::new(), }) } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 277ed45843..fa861470fc 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,19 +11,19 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::network::Network; +use super::super::hat::pubsub::compute_data_route; use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource, Route}; use super::tables::{RoutingExpr, Tables}; use petgraph::graph::NodeIndex; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::convert::TryFrom; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, WhatAmI, WireExpr, ZenohId}, + core::{key_expr::OwnedKeyExpr, WhatAmI, WireExpr}, network::{ declare::{ext, Mode}, Push, @@ -32,169 +32,6 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - subs: &HashSet, -) { - if net.trees.len() > source { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source].directions[sub_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ) - }); - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -fn compute_data_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = HashMap::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return Arc::new(route); - } - log::trace!( - "compute_data_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return Arc::new(route); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.hat.full_net(WhatAmI::Peer) - || *tables - .hat - .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) - == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.hat.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_subs, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), None) - }); - } - } - } - } - } - for mcast_group in &tables.mcast_groups { - route.insert( - mcast_group.id, - ( - mcast_group.clone(), - expr.full_expr().to_string().into(), - None, - ), - ); - } - Arc::new(route) -} - fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { let mut pull_caches = vec![]; let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 3d7264dc12..509ec80300 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -1,5 +1,3 @@ -use crate::net::routing::PREFIX_LIVELINESS; - // // Copyright (c) 2023 ZettaScale Technology // @@ -13,28 +11,19 @@ use crate::net::routing::PREFIX_LIVELINESS; // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::network::Network; +use super::super::hat::queries::compute_local_replies; +use super::super::hat::queries::compute_query_route; use super::face::FaceState; -use super::resource::{QueryRoute, QueryRoutes, QueryTargetQabl, QueryTargetQablSet, Resource}; +use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; -use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; -use std::borrow::Cow; use std::collections::HashMap; -use std::convert::TryFrom; use std::sync::{Arc, Weak}; -use zenoh_buffers::ZBuf; use zenoh_protocol::{ - core::{ - key_expr::{ - include::{Includer, DEFAULT_INCLUDER}, - OwnedKeyExpr, - }, - Encoding, WhatAmI, WireExpr, ZenohId, - }, + core::{Encoding, WhatAmI, WireExpr}, network::{ - declare::{ext, queryable::ext::QueryableInfo}, + declare::ext, request::{ext::TargetType, Request, RequestId}, response::{self, ext::ResponderIdType, Response, ResponseFinal}, }, @@ -48,179 +37,6 @@ pub(crate) struct Query { src_qid: RequestId, } -#[inline] -#[allow(clippy::too_many_arguments)] -fn insert_target_for_qabls( - route: &mut QueryTargetQablSet, - expr: &mut RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - qabls: &HashMap, - complete: bool, -) { - if net.trees.len() > source { - for (qabl, qabl_info) in qabls { - if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - if net.distances.len() > qabl_idx.index() { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: net.distances[qabl_idx.index()], - }); - } - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -lazy_static::lazy_static! { - static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); -} -fn compute_query_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = QueryTargetQablSet::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return EMPTY_ROUTE.clone(); - } - log::trace!( - "compute_query_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return EMPTY_ROUTE.clone(); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.hat.full_net(WhatAmI::Peer) - || *tables - .hat - .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) - == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.hat.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: (context.face.clone(), key_expr.to_owned(), None), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } - } - } - } - } - route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); - Arc::new(route) -} - pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { let mut routes = QueryRoutes { routers_query_routes: vec![], @@ -558,47 +374,6 @@ fn compute_final_route( } } -#[inline] -fn compute_local_replies( - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, -) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!mres.context().router_subs.is_empty() - || !mres.context().peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result -} - #[derive(Clone)] struct QueryCleanup { tables: Arc, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 85872e61b6..da36373df0 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -13,7 +13,8 @@ // use super::face::FaceState; use super::tables::{Tables, TablesLock}; -use std::collections::{HashMap, HashSet}; +use crate::net::routing::hat::HatContext; +use std::collections::HashMap; use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Weak}; @@ -22,7 +23,7 @@ use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; use zenoh_protocol::zenoh::PushBody; use zenoh_protocol::{ - core::{key_expr::keyexpr, ExprId, WireExpr, ZenohId}, + core::{key_expr::keyexpr, ExprId, WireExpr}, network::{ declare::{ ext, queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo, Declare, @@ -74,12 +75,9 @@ pub(crate) struct QueryRoutes { } pub(crate) struct ResourceContext { - pub(crate) router_subs: HashSet, - pub(crate) peer_subs: HashSet, - pub(crate) router_qabls: HashMap, - pub(crate) peer_qabls: HashMap, pub(crate) matches: Vec>, pub(crate) matching_pulls: Arc, + pub(crate) hat: HatContext, pub(crate) valid_data_routes: bool, pub(crate) routers_data_routes: Vec>, pub(crate) peers_data_routes: Vec>, @@ -95,12 +93,9 @@ pub(crate) struct ResourceContext { impl ResourceContext { fn new() -> ResourceContext { ResourceContext { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), matches: Vec::new(), matching_pulls: Arc::new(Vec::new()), + hat: HatContext::new(), valid_data_routes: false, routers_data_routes: Vec::new(), peers_data_routes: Vec::new(), diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index bc2eb520a4..20ec1deec6 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -219,91 +219,7 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { Some(mut face) => { log::debug!("Close {}", face); finalize_pending_queries(tables, &mut face); - - let ctrl_lock = zlock!(tables.ctrl_lock); - let mut wtables = zwrite!(tables.tables); - let mut face_clone = face.clone(); - let face = get_mut_unchecked(&mut face); - for res in face.remote_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.remote_mappings.clear(); - for res in face.local_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.local_mappings.clear(); - - let mut subs_matches = vec![]; - for mut res in face.remote_subs.drain() { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - subs_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; - subs_matches.push(res); - } - } - - let mut qabls_matches = vec![]; - for mut res in face.remote_qabls.drain() { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - qabls_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; - qabls_matches.push(res); - } - } - drop(wtables); - - let mut matches_data_routes = vec![]; - let mut matches_query_routes = vec![]; - let rtables = zread!(tables.tables); - for _match in subs_matches.drain(..) { - matches_data_routes.push((_match.clone(), compute_data_routes_(&rtables, &_match))); - } - for _match in qabls_matches.drain(..) { - matches_query_routes - .push((_match.clone(), compute_query_routes_(&rtables, &_match))); - } - drop(rtables); - - let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - Resource::clean(&mut res); - } - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - Resource::clean(&mut res); - } - wtables.faces.remove(&face.id); - drop(wtables); - drop(ctrl_lock); + super::super::hat::close_face(tables, &mut face); } None => log::error!("Face already closed!"), } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 905c4ff4fd..a36aaa8331 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,15 +17,23 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::network::Network; -use super::dispatcher::tables::{Resource, TablesLock}; +use self::{ + network::Network, pubsub::undeclare_client_subscription, queries::undeclare_client_queryable, +}; +use super::dispatcher::{ + face::FaceState, + queries::compute_query_routes_, + tables::{compute_data_routes_, Resource, TablesLock}, +}; use async_std::task::JoinHandle; use std::{ - collections::{hash_map::DefaultHasher, HashSet}, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, sync::Arc, }; use zenoh_config::{WhatAmI, ZenohId}; +use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_sync::get_mut_unchecked; pub mod network; pub mod pubsub; @@ -193,3 +201,125 @@ impl HatTables { } } } + +pub(crate) struct HatContext { + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, +} + +impl HatContext { + pub fn new() -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashMap::new(), + peer_qabls: HashMap::new(), + } + } +} + +pub(crate) struct HatFace { + pub(crate) local_subs: HashSet>, + pub(crate) remote_subs: HashSet>, + pub(crate) local_qabls: HashMap, QueryableInfo>, + pub(crate) remote_qabls: HashSet>, +} + +impl HatFace { + pub fn new() -> Self { + Self { + local_subs: HashSet::new(), + remote_subs: HashSet::new(), + local_qabls: HashMap::new(), + remote_qabls: HashSet::new(), + } + } +} + +pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { + let ctrl_lock = zlock!(tables.ctrl_lock); + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face.hat.remote_subs.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face.hat.remote_qabls.drain() { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push((_match.clone(), compute_data_routes_(&rtables, &_match))); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push((_match.clone(), compute_query_routes_(&rtables, &_match))); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + drop(ctrl_lock); +} diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index a92d8a4640..650c9fe849 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -1,3 +1,5 @@ +use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; + // // Copyright (c) 2023 ZettaScale Technology // @@ -19,9 +21,11 @@ use super::super::PREFIX_LIVELINESS; use super::network::Network; use super::HatTables; use petgraph::graph::NodeIndex; -use std::collections::HashMap; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; use std::sync::{Arc, RwLockReadGuard}; use zenoh_core::zread; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -80,7 +84,7 @@ fn propagate_simple_subscription_to( full_peer_net: bool, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !dst_face.local_subs.contains(res) + && !dst_face.hat.local_subs.contains(res) && match tables.whatami { WhatAmI::Router => { if full_peer_net { @@ -102,7 +106,10 @@ fn propagate_simple_subscription_to( _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, } { - get_mut_unchecked(dst_face).local_subs.insert(res.clone()); + get_mut_unchecked(dst_face) + .hat + .local_subs + .insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -186,7 +193,7 @@ fn register_router_subscription( sub_info: &SubscriberInfo, router: ZenohId, ) { - if !res.context().router_subs.contains(&router) { + if !res.context().hat.router_subs.contains(&router) { // Register router subscription { log::debug!( @@ -196,6 +203,7 @@ fn register_router_subscription( ); get_mut_unchecked(res) .context_mut() + .hat .router_subs .insert(router); tables.hat.router_subs.insert(res.clone()); @@ -276,11 +284,15 @@ fn register_peer_subscription( sub_info: &SubscriberInfo, peer: ZenohId, ) { - if !res.context().peer_subs.contains(&peer) { + if !res.context().hat.peer_subs.contains(&peer) { // Register peer subscription { log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - get_mut_unchecked(res).context_mut().peer_subs.insert(peer); + get_mut_unchecked(res) + .context_mut() + .hat + .peer_subs + .insert(peer); tables.hat.peer_subs.insert(res.clone()); } @@ -392,7 +404,7 @@ fn register_client_subscription( } } } - get_mut_unchecked(face).remote_subs.insert(res.clone()); + get_mut_unchecked(face).hat.remote_subs.insert(res.clone()); } pub fn declare_client_subscription( @@ -515,6 +527,7 @@ fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res .context() + .hat .router_subs .iter() .any(|peer| peer != &tables.zid) @@ -525,6 +538,7 @@ fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res .context() + .hat .peer_subs .iter() .any(|peer| peer != &tables.zid) @@ -583,7 +597,7 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face.local_subs.contains(res) { + if face.hat.local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -594,15 +608,15 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc ext_wire_expr: WireExprType { wire_expr }, }), }); - get_mut_unchecked(face).local_subs.remove(res); + get_mut_unchecked(face).hat.local_subs.remove(res); } } } fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { if !tables.hat.full_net(WhatAmI::Peer) - && res.context().router_subs.len() == 1 - && res.context().router_subs.contains(&tables.zid) + && res.context().hat.router_subs.len() == 1 + && res.context().hat.router_subs.contains(&tables.zid) { for mut face in tables .faces @@ -611,7 +625,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< .collect::>>() { if face.whatami == WhatAmI::Peer - && face.local_subs.contains(res) + && face.hat.local_subs.contains(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() @@ -631,7 +645,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }), }); - get_mut_unchecked(&mut face).local_subs.remove(res); + get_mut_unchecked(&mut face).hat.local_subs.remove(res); } } } @@ -681,10 +695,11 @@ fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, ); get_mut_unchecked(res) .context_mut() + .hat .router_subs .retain(|sub| sub != router); - if res.context().router_subs.is_empty() { + if res.context().hat.router_subs.is_empty() { tables.hat.router_subs.retain(|sub| !Arc::ptr_eq(sub, res)); if tables.hat.full_net(WhatAmI::Peer) { @@ -702,7 +717,7 @@ fn undeclare_router_subscription( res: &mut Arc, router: &ZenohId, ) { - if res.context().router_subs.contains(router) { + if res.context().hat.router_subs.contains(router) { unregister_router_subscription(tables, res, router); propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); } @@ -750,10 +765,11 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe ); get_mut_unchecked(res) .context_mut() + .hat .peer_subs .retain(|sub| sub != peer); - if res.context().peer_subs.is_empty() { + if res.context().hat.peer_subs.is_empty() { tables.hat.peer_subs.retain(|sub| !Arc::ptr_eq(sub, res)); if tables.whatami == WhatAmI::Peer { @@ -768,7 +784,7 @@ fn undeclare_peer_subscription( res: &mut Arc, peer: &ZenohId, ) { - if res.context().peer_subs.contains(peer) { + if res.context().hat.peer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); } @@ -825,7 +841,7 @@ pub(crate) fn undeclare_client_subscription( if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } - get_mut_unchecked(face).remote_subs.remove(res); + get_mut_unchecked(face).hat.remote_subs.remove(res); let mut client_subs = client_subs(res); let router_subs = remote_router_subs(tables, res); @@ -855,7 +871,7 @@ pub(crate) fn undeclare_client_subscription( } if client_subs.len() == 1 && !router_subs && !peer_subs { let face = &mut client_subs[0]; - if face.local_subs.contains(res) + if face.hat.local_subs.contains(res) && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -869,7 +885,7 @@ pub(crate) fn undeclare_client_subscription( }), }); - get_mut_unchecked(face).local_subs.remove(res); + get_mut_unchecked(face).hat.local_subs.remove(res); } } } @@ -917,7 +933,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { WhatAmI::Router => { if face.whatami == WhatAmI::Client { for sub in &tables.hat.router_subs { - get_mut_unchecked(face).local_subs.insert(sub.clone()); + get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -933,7 +949,12 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { for sub in &tables.hat.router_subs { if sub.context.is_some() - && (sub.context().router_subs.iter().any(|r| *r != tables.zid) + && (sub + .context() + .hat + .router_subs + .iter() + .any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { s.subs.is_some() && (s.face.whatami == WhatAmI::Client @@ -941,7 +962,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { && tables.hat.failover_brokering(s.face.zid, face.zid))) })) { - get_mut_unchecked(face).local_subs.insert(sub.clone()); + get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -961,7 +982,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { if tables.hat.full_net(WhatAmI::Peer) { if face.whatami == WhatAmI::Client { for sub in &tables.hat.peer_subs { - get_mut_unchecked(face).local_subs.insert(sub.clone()); + get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -982,7 +1003,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &src_face.remote_subs { + for sub in &src_face.hat.remote_subs { propagate_simple_subscription_to( tables, face, @@ -1002,7 +1023,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &src_face.remote_subs { + for sub in &src_face.hat.remote_subs { propagate_simple_subscription_to( tables, face, @@ -1024,7 +1045,7 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: .hat .router_subs .iter() - .filter(|res| res.context().router_subs.contains(node)) + .filter(|res| res.context().hat.router_subs.contains(node)) .cloned() .collect::>>() { @@ -1044,7 +1065,7 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: .hat .peer_subs .iter() - .filter(|res| res.context().peer_subs.contains(node)) + .filter(|res| res.context().hat.peer_subs.contains(node)) .cloned() .collect::>>() { @@ -1092,8 +1113,8 @@ pub(crate) fn pubsub_tree_change( for res in subs_res { let subs = match net_type { - WhatAmI::Router => &res.context().router_subs, - _ => &res.context().peer_subs, + WhatAmI::Router => &res.context().hat.router_subs, + _ => &res.context().hat.peer_subs, }; for sub in subs { if *sub == tree_id { @@ -1127,7 +1148,7 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { - for res in &src_face.remote_subs { + for res in &src_face.hat.remote_subs { let client_subs = res .session_ctxs .values() @@ -1139,7 +1160,7 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.local_subs.contains(res) { + if dst_face.hat.local_subs.contains(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = tables @@ -1171,11 +1192,14 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ), }); - get_mut_unchecked(dst_face).local_subs.remove(res); + get_mut_unchecked(dst_face).hat.local_subs.remove(res); } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; - get_mut_unchecked(dst_face).local_subs.insert(res.clone()); + get_mut_unchecked(dst_face) + .hat + .local_subs + .insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // TODO @@ -1199,3 +1223,166 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: } } } + +#[inline] +fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: usize, + subs: &HashSet, +) { + if net.trees.len() > source { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + ( + face.clone(), + key_expr.to_owned(), + if source != 0 { + Some(source as u16) + } else { + None + }, + ) + }); + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +pub(crate) fn compute_data_route( + tables: &Tables, + expr: &mut RoutingExpr, + source: Option, + source_type: WhatAmI, +) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return Arc::new(route); + } + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !tables.hat.full_net(WhatAmI::Peer) + || *tables + .hat + .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = tables.hat.routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &mres.context().hat.router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().hat.peer_subs, + ); + } + } + + if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().hat.peer_subs, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), None) + }); + } + } + } + } + } + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + None, + ), + ); + } + Arc::new(route) +} diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/queries.rs index 3e4da6cab8..514bff6d11 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -1,3 +1,6 @@ +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; +use crate::net::routing::PREFIX_LIVELINESS; + // // Copyright (c) 2023 ZettaScale Technology // @@ -17,9 +20,14 @@ use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContex use super::super::dispatcher::tables::{Tables, TablesLock}; use super::network::Network; use super::HatTables; +use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; +use std::borrow::Cow; use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; +use zenoh_buffers::ZBuf; +use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, network::declare::{ @@ -48,7 +56,7 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { let info = if tables.hat.full_net(WhatAmI::Peer) { res.context.as_ref().and_then(|ctx| { - ctx.peer_qabls.iter().fold(None, |accu, (zid, info)| { + ctx.hat.peer_qabls.iter().fold(None, |accu, (zid, info)| { if *zid != tables.zid { Some(match accu { Some(accu) => merge_qabl_infos(accu, info), @@ -83,6 +91,7 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { res.context() + .hat .router_qabls .iter() .fold(None, |accu, (zid, info)| { @@ -119,6 +128,7 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { res.context() + .hat .router_qabls .iter() .fold(None, |accu, (zid, info)| { @@ -137,6 +147,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) if res.context.is_some() && tables.hat.full_net(WhatAmI::Peer) { info = res .context() + .hat .peer_qabls .iter() .fold(info, |accu, (zid, info)| { @@ -224,7 +235,7 @@ fn propagate_simple_queryable( let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = dst_face.local_qabls.get(res); + let current_info = dst_face.hat.local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) && match tables.whatami { @@ -259,6 +270,7 @@ fn propagate_simple_queryable( } { get_mut_unchecked(&mut dst_face) + .hat .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); @@ -321,7 +333,7 @@ fn register_router_queryable( qabl_info: &QueryableInfo, router: ZenohId, ) { - let current_info = res.context().router_qabls.get(&router); + let current_info = res.context().hat.router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { @@ -332,6 +344,7 @@ fn register_router_queryable( ); get_mut_unchecked(res) .context_mut() + .hat .router_qabls .insert(router, *qabl_info); tables.hat.router_qabls.insert(res.clone()); @@ -421,13 +434,14 @@ fn register_peer_queryable( qabl_info: &QueryableInfo, peer: ZenohId, ) { - let current_info = res.context().peer_qabls.get(&peer); + let current_info = res.context().hat.peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); get_mut_unchecked(res) .context_mut() + .hat .peer_qabls .insert(peer, *qabl_info); tables.hat.peer_qabls.insert(res.clone()); @@ -532,7 +546,7 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - get_mut_unchecked(face).remote_qabls.insert(res.clone()); + get_mut_unchecked(face).hat.remote_qabls.insert(res.clone()); } pub fn declare_client_queryable( @@ -626,6 +640,7 @@ fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res .context() + .hat .router_qabls .keys() .any(|router| router != &tables.zid) @@ -636,6 +651,7 @@ fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() && res .context() + .hat .peer_qabls .keys() .any(|peer| peer != &tables.zid) @@ -694,7 +710,7 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face.local_qabls.contains_key(res) { + if face.hat.local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -706,15 +722,15 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { if !tables.hat.full_net(WhatAmI::Peer) - && res.context().router_qabls.len() == 1 - && res.context().router_qabls.contains_key(&tables.zid) + && res.context().hat.router_qabls.len() == 1 + && res.context().hat.router_qabls.contains_key(&tables.zid) { for mut face in tables .faces @@ -723,7 +739,7 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc .collect::>>() { if face.whatami == WhatAmI::Peer - && face.local_qabls.contains_key(res) + && face.hat.local_qabls.contains_key(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.qabl.is_some() @@ -743,7 +759,7 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc }), }); - get_mut_unchecked(&mut face).local_qabls.remove(res); + get_mut_unchecked(&mut face).hat.local_qabls.remove(res); } } } @@ -793,10 +809,11 @@ fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, rou ); get_mut_unchecked(res) .context_mut() + .hat .router_qabls .remove(router); - if res.context().router_qabls.is_empty() { + if res.context().hat.router_qabls.is_empty() { tables .hat .router_qabls @@ -817,7 +834,7 @@ fn undeclare_router_queryable( res: &mut Arc, router: &ZenohId, ) { - if res.context().router_qabls.contains_key(router) { + if res.context().hat.router_qabls.contains_key(router) { unregister_router_queryable(tables, res, router); propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); } @@ -860,9 +877,13 @@ pub fn forget_router_queryable( fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - get_mut_unchecked(res).context_mut().peer_qabls.remove(peer); + get_mut_unchecked(res) + .context_mut() + .hat + .peer_qabls + .remove(peer); - if res.context().peer_qabls.is_empty() { + if res.context().hat.peer_qabls.is_empty() { tables.hat.peer_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); if tables.whatami == WhatAmI::Peer { @@ -877,7 +898,7 @@ fn undeclare_peer_queryable( res: &mut Arc, peer: &ZenohId, ) { - if res.context().peer_qabls.contains_key(peer) { + if res.context().hat.peer_qabls.contains_key(peer) { unregister_peer_queryable(tables, res, peer); propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); } @@ -938,7 +959,7 @@ pub(crate) fn undeclare_client_queryable( if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { - get_mut_unchecked(face).remote_qabls.remove(res); + get_mut_unchecked(face).hat.remote_qabls.remove(res); } } @@ -981,7 +1002,7 @@ pub(crate) fn undeclare_client_queryable( if client_qabls.len() == 1 && !router_qabls && !peer_qabls { let face = &mut client_qabls[0]; - if face.local_qabls.contains_key(res) { + if face.hat.local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -993,7 +1014,7 @@ pub(crate) fn undeclare_client_queryable( }), }); - get_mut_unchecked(face).local_qabls.remove(res); + get_mut_unchecked(face).hat.local_qabls.remove(res); } } } @@ -1040,6 +1061,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); get_mut_unchecked(face) + .hat .local_qabls .insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); @@ -1058,7 +1080,12 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { for qabl in tables.hat.router_qabls.iter() { if qabl.context.is_some() - && (qabl.context().router_qabls.keys().any(|r| *r != tables.zid) + && (qabl + .context() + .hat + .router_qabls + .keys() + .any(|r| *r != tables.zid) || qabl.session_ctxs.values().any(|s| { s.qabl.is_some() && (s.face.whatami == WhatAmI::Client @@ -1068,6 +1095,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { { let info = local_qabl_info(tables, qabl, face); get_mut_unchecked(face) + .hat .local_qabls .insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); @@ -1092,6 +1120,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); get_mut_unchecked(face) + .hat .local_qabls .insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); @@ -1115,7 +1144,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for qabl in face.remote_qabls.iter() { + for qabl in face.hat.remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -1128,7 +1157,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for qabl in face.remote_qabls.iter() { + for qabl in face.hat.remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -1141,7 +1170,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI::Router => { let mut qabls = vec![]; for res in tables.hat.router_qabls.iter() { - for qabl in res.context().router_qabls.keys() { + for qabl in res.context().hat.router_qabls.keys() { if qabl == node { qabls.push(res.clone()); } @@ -1162,7 +1191,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI::Peer => { let mut qabls = vec![]; for res in tables.hat.router_qabls.iter() { - for qabl in res.context().router_qabls.keys() { + for qabl in res.context().hat.router_qabls.keys() { if qabl == node { qabls.push(res.clone()); } @@ -1201,7 +1230,7 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { - for res in &src_face.remote_qabls { + for res in &src_face.hat.remote_qabls { let client_qabls = res .session_ctxs .values() @@ -1213,7 +1242,7 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.local_qabls.contains_key(res) { + if dst_face.hat.local_qabls.contains_key(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { let ctx_links = tables @@ -1243,12 +1272,13 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }), }); - get_mut_unchecked(dst_face).local_qabls.remove(res); + get_mut_unchecked(dst_face).hat.local_qabls.remove(res); } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); get_mut_unchecked(dst_face) + .hat .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); @@ -1291,8 +1321,8 @@ pub(crate) fn queries_tree_change( for res in qabls_res { let qabls = match net_type { - WhatAmI::Router => &res.context().router_qabls, - _ => &res.context().peer_qabls, + WhatAmI::Router => &res.context().hat.router_qabls, + _ => &res.context().hat.peer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_childs( @@ -1313,3 +1343,217 @@ pub(crate) fn queries_tree_change( // recompute routes compute_query_routes_from(tables, &mut tables.root_res.clone()); } + +#[inline] +#[allow(clippy::too_many_arguments)] +fn insert_target_for_qabls( + route: &mut QueryTargetQablSet, + expr: &mut RoutingExpr, + tables: &Tables, + net: &Network, + source: usize, + qabls: &HashMap, + complete: bool, +) { + if net.trees.len() > source { + for (qabl, qabl_info) in qabls { + if let Some(qabl_idx) = net.get_idx(qabl) { + if net.trees[source].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + if net.distances.len() > qabl_idx.index() { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: ( + face.clone(), + key_expr.to_owned(), + if source != 0 { + Some(source as u16) + } else { + None + }, + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: net.distances[qabl_idx.index()], + }); + } + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +lazy_static::lazy_static! { + static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); +} +pub(crate) fn compute_query_route( + tables: &Tables, + expr: &mut RoutingExpr, + source: Option, + source_type: WhatAmI, +) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return EMPTY_ROUTE.clone(); + } + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !tables.hat.full_net(WhatAmI::Peer) + || *tables + .hat + .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = tables.hat.routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &mres.context().hat.router_qabls, + complete, + ); + } + + if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().hat.peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { + let net = tables.hat.peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source.unwrap(), + _ => net.idx.index(), + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &mres.context().hat.peer_qabls, + complete, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: (context.face.clone(), key_expr.to_owned(), None), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } + } + } + } + } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) +} + +#[inline] +pub(crate) fn compute_local_replies( + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, +) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!mres.context().hat.router_subs.is_empty() + || !mres.context().hat.peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } + } + } + } + result +} From 939daa485b29187bab880be3920afe2d76713974 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 2 Nov 2023 17:04:02 +0100 Subject: [PATCH 003/122] Use RoutingContext type --- zenoh/src/net/routing/dispatcher/face.rs | 4 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 152 +++++++++++++------ zenoh/src/net/routing/dispatcher/queries.rs | 123 ++++++++++----- zenoh/src/net/routing/dispatcher/resource.rs | 32 ++-- zenoh/src/net/routing/hat/network.rs | 9 +- zenoh/src/net/routing/hat/pubsub.rs | 47 +++--- zenoh/src/net/routing/hat/queries.rs | 58 ++++--- zenoh/src/net/runtime/mod.rs | 2 +- 8 files changed, 273 insertions(+), 154 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c2cc6dea69..945989ce28 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -395,7 +395,7 @@ impl Primitives for Face { &msg.wire_expr, msg.ext_qos, msg.payload, - msg.ext_nodeid.node_id as u64, + msg.ext_nodeid.node_id, ); } @@ -411,7 +411,7 @@ impl Primitives for Face { msg.ext_target, // consolidation, msg.payload, - msg.ext_nodeid.node_id as u64, + msg.ext_nodeid.node_id, ); } RequestBody::Pull(_) => { diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index fa861470fc..c6489448cf 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -14,7 +14,7 @@ use super::super::hat::pubsub::compute_data_route; use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource, Route}; -use super::tables::{RoutingExpr, Tables}; +use super::tables::{RoutingContext, RoutingExpr, Tables}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; @@ -83,11 +83,20 @@ pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> Data .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + routes.routers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); } - routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + routes.peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && tables.hat.full_net(WhatAmI::Peer) @@ -106,18 +115,35 @@ pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> Data .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + routes.peers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); } } if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + routes.client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if tables.whatami == WhatAmI::Client { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + routes.client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); } routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); routes @@ -143,12 +169,20 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + routers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); } - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + res_mut.context_mut().peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && tables.hat.full_net(WhatAmI::Peer) @@ -167,19 +201,35 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + peers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); } } if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); + res_mut.context_mut().client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); + res_mut.context_mut().client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); } res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); } @@ -267,7 +317,7 @@ fn get_data_route( face: &FaceState, res: &Option>, expr: &mut RoutingExpr, - routing_context: u64, + routing_context: RoutingContext, ) -> Arc { match tables.whatami { WhatAmI::Router => match face.whatami { @@ -277,7 +327,7 @@ fn get_data_route( res.as_ref() .and_then(|res| res.routers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, local_context, face.whatami) }) } WhatAmI::Peer => { @@ -287,18 +337,27 @@ fn get_data_route( res.as_ref() .and_then(|res| res.peers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, local_context, face.whatami) }) } else { res.as_ref() .and_then(|res| res.peer_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| { + compute_data_route( + tables, + expr, + RoutingContext::default(), + face.whatami, + ) + }) } } _ => res .as_ref() - .and_then(|res| res.routers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .and_then(|res| res.routers_data_route(RoutingContext::default())) + .unwrap_or_else(|| { + compute_data_route(tables, expr, RoutingContext::default(), face.whatami) + }), }, WhatAmI::Peer => { if tables.hat.full_net(WhatAmI::Peer) { @@ -310,13 +369,20 @@ fn get_data_route( res.as_ref() .and_then(|res| res.peers_data_route(local_context)) .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), face.whatami) + compute_data_route(tables, expr, local_context, face.whatami) }) } _ => res .as_ref() - .and_then(|res| res.peers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .and_then(|res| res.peers_data_route(RoutingContext::default())) + .unwrap_or_else(|| { + compute_data_route( + tables, + expr, + RoutingContext::default(), + face.whatami, + ) + }), } } else { res.as_ref() @@ -324,13 +390,17 @@ fn get_data_route( WhatAmI::Client => res.client_data_route(), _ => res.peer_data_route(), }) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| { + compute_data_route(tables, expr, RoutingContext::default(), face.whatami) + }) } } _ => res .as_ref() .and_then(|res| res.client_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, face.whatami)), + .unwrap_or_else(|| { + compute_data_route(tables, expr, RoutingContext::default(), face.whatami) + }), } } @@ -424,7 +494,7 @@ pub fn full_reentrant_route_data( expr: &WireExpr, ext_qos: ext::QoSType, mut payload: PushBody, - routing_context: u64, + routing_context: RoutingContext, ) { let tables = zread!(tables_ref); match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { @@ -477,9 +547,7 @@ pub fn full_reentrant_route_data( wire_expr: key_expr.into(), ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, + ext_nodeid: ext::NodeIdType { node_id: *context }, payload, }) } @@ -512,9 +580,7 @@ pub fn full_reentrant_route_data( wire_expr: key_expr, ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, + ext_nodeid: ext::NodeIdType { node_id: context }, payload: payload.clone(), }) } @@ -541,9 +607,7 @@ pub fn full_reentrant_route_data( wire_expr: key_expr.into(), ext_qos, ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, + ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }) } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 509ec80300..13cabfe666 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -15,6 +15,7 @@ use super::super::hat::queries::compute_local_replies; use super::super::hat::queries::compute_query_route; use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; +use super::tables::RoutingContext; use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; use petgraph::graph::NodeIndex; @@ -60,11 +61,20 @@ pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> Que .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + routes.routers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); } - routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + routes.peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && tables.hat.full_net(WhatAmI::Peer) @@ -83,24 +93,33 @@ pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> Que .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + routes.peers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); } } if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { routes.client_query_route = Some(compute_query_route( tables, &mut expr, - None, + RoutingContext::default(), WhatAmI::Client, )); - routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + routes.peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if tables.whatami == WhatAmI::Client { routes.client_query_route = Some(compute_query_route( tables, &mut expr, - None, + RoutingContext::default(), WhatAmI::Client, )); } @@ -128,12 +147,20 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); + routers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); } - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + res_mut.context_mut().peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && tables.hat.full_net(WhatAmI::Peer) @@ -153,25 +180,33 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); + peers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); } } if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { res_mut.context_mut().client_query_route = Some(compute_query_route( tables, &mut expr, - None, + RoutingContext::default(), WhatAmI::Client, )); - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); + res_mut.context_mut().peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); } if tables.whatami == WhatAmI::Client { res_mut.context_mut().client_query_route = Some(compute_query_route( tables, &mut expr, - None, + RoutingContext::default(), WhatAmI::Client, )); } @@ -423,7 +458,7 @@ fn get_query_route( face: &FaceState, res: &Option>, expr: &mut RoutingExpr, - routing_context: u64, + routing_context: RoutingContext, ) -> Arc { match tables.whatami { WhatAmI::Router => match face.whatami { @@ -433,7 +468,7 @@ fn get_query_route( res.as_ref() .and_then(|res| res.routers_query_route(local_context)) .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) + compute_query_route(tables, expr, local_context, face.whatami) }) } WhatAmI::Peer => { @@ -443,18 +478,27 @@ fn get_query_route( res.as_ref() .and_then(|res| res.peers_query_route(local_context)) .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) + compute_query_route(tables, expr, local_context, face.whatami) }) } else { res.as_ref() .and_then(|res| res.peer_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| { + compute_query_route( + tables, + expr, + RoutingContext::default(), + face.whatami, + ) + }) } } _ => res .as_ref() - .and_then(|res| res.routers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + .and_then(|res| res.routers_query_route(RoutingContext::default())) + .unwrap_or_else(|| { + compute_query_route(tables, expr, RoutingContext::default(), face.whatami) + }), }, WhatAmI::Peer => { if tables.hat.full_net(WhatAmI::Peer) { @@ -466,13 +510,20 @@ fn get_query_route( res.as_ref() .and_then(|res| res.peers_query_route(local_context)) .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) + compute_query_route(tables, expr, local_context, face.whatami) }) } _ => res .as_ref() - .and_then(|res| res.peers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + .and_then(|res| res.peers_query_route(RoutingContext::default())) + .unwrap_or_else(|| { + compute_query_route( + tables, + expr, + RoutingContext::default(), + face.whatami, + ) + }), } } else { res.as_ref() @@ -480,13 +531,17 @@ fn get_query_route( WhatAmI::Client => res.client_query_route(), _ => res.peer_query_route(), }) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) + .unwrap_or_else(|| { + compute_query_route(tables, expr, RoutingContext::default(), face.whatami) + }) } } _ => res .as_ref() .and_then(|res| res.client_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), + .unwrap_or_else(|| { + compute_query_route(tables, expr, RoutingContext::default(), face.whatami) + }), } } @@ -563,7 +618,7 @@ pub fn route_query( qid: RequestId, target: TargetType, body: RequestBody, - routing_context: u64, + routing_context: RoutingContext, ) { let rtables = zread!(tables_ref.tables); match rtables.get_mapping(face, &expr.scope, expr.mapping) { @@ -682,9 +737,7 @@ pub fn route_query( wire_expr: key_expr.into(), ext_qos: ext::QoSType::request_default(), // TODO ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, + ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: *t, ext_budget: None, ext_timeout: None, @@ -717,9 +770,7 @@ pub fn route_query( wire_expr: key_expr.into(), ext_qos: ext::QoSType::request_default(), ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, + ext_nodeid: ext::NodeIdType { node_id: *context }, ext_target: target, ext_budget: None, ext_timeout: None, diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index da36373df0..4569706d3d 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -36,7 +36,7 @@ use zenoh_sync::get_mut_unchecked; pub(crate) type RoutingContext = u16; -pub(crate) type Direction = (Arc, WireExpr<'static>, Option); +pub(crate) type Direction = (Arc, WireExpr<'static>, RoutingContext); pub(crate) type Route = HashMap; #[cfg(feature = "complete_n")] pub(crate) type QueryRoute = HashMap; @@ -205,12 +205,12 @@ impl Resource { } #[inline(always)] - pub fn routers_data_route(&self, context: usize) -> Option> { + pub fn routers_data_route(&self, context: RoutingContext) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_data_routes { - (ctx.routers_data_routes.len() > context) - .then(|| ctx.routers_data_routes[context].clone()) + (ctx.routers_data_routes.len() > context as usize) + .then(|| ctx.routers_data_routes[context as usize].clone()) } else { None } @@ -221,12 +221,12 @@ impl Resource { } #[inline(always)] - pub fn peers_data_route(&self, context: usize) -> Option> { + pub fn peers_data_route(&self, context: RoutingContext) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_data_routes { - (ctx.peers_data_routes.len() > context) - .then(|| ctx.peers_data_routes[context].clone()) + (ctx.peers_data_routes.len() > context as usize) + .then(|| ctx.peers_data_routes[context as usize].clone()) } else { None } @@ -264,12 +264,15 @@ impl Resource { } #[inline(always)] - pub(crate) fn routers_query_route(&self, context: usize) -> Option> { + pub(crate) fn routers_query_route( + &self, + context: RoutingContext, + ) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { - (ctx.routers_query_routes.len() > context) - .then(|| ctx.routers_query_routes[context].clone()) + (ctx.routers_query_routes.len() > context as usize) + .then(|| ctx.routers_query_routes[context as usize].clone()) } else { None } @@ -279,12 +282,15 @@ impl Resource { } #[inline(always)] - pub(crate) fn peers_query_route(&self, context: usize) -> Option> { + pub(crate) fn peers_query_route( + &self, + context: RoutingContext, + ) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_query_routes { - (ctx.peers_query_routes.len() > context) - .then(|| ctx.peers_query_routes[context].clone()) + (ctx.peers_query_routes.len() > context as usize) + .then(|| ctx.peers_query_routes[context as usize].clone()) } else { None } diff --git a/zenoh/src/net/routing/hat/network.rs b/zenoh/src/net/routing/hat/network.rs index 2c1a9746da..94f241bbcf 100644 --- a/zenoh/src/net/routing/hat/network.rs +++ b/zenoh/src/net/routing/hat/network.rs @@ -13,6 +13,7 @@ // use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; +use crate::net::routing::dispatcher::tables::RoutingContext; use crate::net::runtime::Runtime; use async_std::task; use petgraph::graph::NodeIndex; @@ -190,9 +191,13 @@ impl Network { } #[inline] - pub(crate) fn get_local_context(&self, context: u64, link_id: usize) -> usize { + pub(crate) fn get_local_context( + &self, + context: RoutingContext, + link_id: usize, + ) -> RoutingContext { match self.get_link(link_id) { - Some(link) => match link.get_local_psid(&context) { + Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), None => { log::error!( diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index 650c9fe849..d511ac210e 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -167,7 +167,7 @@ fn propagate_sourced_subscription( res, src_face, sub_info, - Some(tree_sid.index() as u16), + Some(tree_sid.index() as RoutingContext), ); } else { log::trace!( @@ -668,7 +668,7 @@ fn propagate_forget_sourced_subscription( &net.trees[tree_sid.index()].childs, res, src_face, - Some(tree_sid.index() as u16), + Some(tree_sid.index() as RoutingContext), ); } else { log::trace!( @@ -1129,7 +1129,7 @@ pub(crate) fn pubsub_tree_change( res, None, &sub_info, - Some(tree_sid as u16), + Some(tree_sid as RoutingContext), ); } } @@ -1230,28 +1230,21 @@ fn insert_faces_for_subs( expr: &RoutingExpr, tables: &Tables, net: &Network, - source: usize, + source: RoutingContext, subs: &HashSet, ) { - if net.trees.len() > source { + if net.trees.len() > source as usize { for sub in subs { if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source].directions[sub_idx.index()] { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + { if net.graph.contains_node(direction) { if let Some(face) = tables.get_face(&net.graph[direction].zid) { route.entry(face.id).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); - ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ) + (face.clone(), key_expr.to_owned(), source) }); } } @@ -1267,7 +1260,7 @@ fn insert_faces_for_subs( pub(crate) fn compute_data_route( tables: &Tables, expr: &mut RoutingExpr, - source: Option, + source: RoutingContext, source_type: WhatAmI, ) -> Arc { let mut route = HashMap::new(); @@ -1308,8 +1301,8 @@ pub(crate) fn compute_data_route( if master || source_type == WhatAmI::Router { let net = tables.hat.routers_net.as_ref().unwrap(); let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, }; insert_faces_for_subs( &mut route, @@ -1324,8 +1317,8 @@ pub(crate) fn compute_data_route( if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { let net = tables.hat.peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, }; insert_faces_for_subs( &mut route, @@ -1341,8 +1334,8 @@ pub(crate) fn compute_data_route( if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { let net = tables.hat.peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, }; insert_faces_for_subs( &mut route, @@ -1367,7 +1360,11 @@ pub(crate) fn compute_data_route( { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), None) + ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ) }); } } @@ -1380,7 +1377,7 @@ pub(crate) fn compute_data_route( ( mcast_group.clone(), expr.full_expr().to_string().into(), - None, + RoutingContext::default(), ), ); } diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/queries.rs index 514bff6d11..876bb287a6 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -1,6 +1,3 @@ -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::PREFIX_LIVELINESS; - // // Copyright (c) 2023 ZettaScale Technology // @@ -20,6 +17,8 @@ use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContex use super::super::dispatcher::tables::{Tables, TablesLock}; use super::network::Network; use super::HatTables; +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; +use crate::net::routing::PREFIX_LIVELINESS; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -195,7 +194,7 @@ fn send_sourced_queryable_to_net_childs( res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, - routing_context: Option, + routing_context: RoutingContext, ) { for child in childs { if net.graph.contains_node(*child) { @@ -210,7 +209,7 @@ fn send_sourced_queryable_to_net_childs( ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + node_id: routing_context, }, body: DeclareBody::DeclareQueryable(DeclareQueryable { id: 0, // TODO @@ -307,7 +306,7 @@ fn propagate_sourced_queryable( res, qabl_info, src_face, - Some(tree_sid.index() as u16), + tree_sid.index() as RoutingContext, ); } else { log::trace!( @@ -678,7 +677,7 @@ fn send_forget_sourced_queryable_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: Option, + routing_context: RoutingContext, ) { for child in childs { if net.graph.contains_node(*child) { @@ -693,7 +692,7 @@ fn send_forget_sourced_queryable_to_net_childs( ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + node_id: routing_context, }, body: DeclareBody::UndeclareQueryable(UndeclareQueryable { id: 0, // TODO @@ -782,7 +781,7 @@ fn propagate_forget_sourced_queryable( &net.trees[tree_sid.index()].childs, res, src_face, - Some(tree_sid.index() as u16), + tree_sid.index() as RoutingContext, ); } else { log::trace!( @@ -1332,7 +1331,7 @@ pub(crate) fn queries_tree_change( res, qabl_info, None, - Some(tree_sid as u16), + tree_sid as RoutingContext, ); } } @@ -1351,30 +1350,23 @@ fn insert_target_for_qabls( expr: &mut RoutingExpr, tables: &Tables, net: &Network, - source: usize, + source: RoutingContext, qabls: &HashMap, complete: bool, ) { - if net.trees.len() > source { + if net.trees.len() > source as usize { for (qabl, qabl_info) in qabls { if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { + if net.trees[source as usize].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] + { if net.graph.contains_node(direction) { if let Some(face) = tables.get_face(&net.graph[direction].zid) { if net.distances.len() > qabl_idx.index() { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, face.id); route.push(QueryTargetQabl { - direction: ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ), + direction: (face.clone(), key_expr.to_owned(), source), complete: if complete { qabl_info.complete as u64 } else { @@ -1400,7 +1392,7 @@ lazy_static::lazy_static! { pub(crate) fn compute_query_route( tables: &Tables, expr: &mut RoutingExpr, - source: Option, + source: RoutingContext, source_type: WhatAmI, ) -> Arc { let mut route = QueryTargetQablSet::new(); @@ -1442,8 +1434,8 @@ pub(crate) fn compute_query_route( if master || source_type == WhatAmI::Router { let net = tables.hat.routers_net.as_ref().unwrap(); let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, }; insert_target_for_qabls( &mut route, @@ -1459,8 +1451,8 @@ pub(crate) fn compute_query_route( if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { let net = tables.hat.peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, }; insert_target_for_qabls( &mut route, @@ -1477,8 +1469,8 @@ pub(crate) fn compute_query_route( if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { let net = tables.hat.peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, }; insert_target_for_qabls( &mut route, @@ -1500,7 +1492,11 @@ pub(crate) fn compute_query_route( let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { - direction: (context.face.clone(), key_expr.to_owned(), None), + direction: ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ), complete: if complete { qabl_info.complete as u64 } else { diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 7ca9297aa7..acacd4c64a 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -287,7 +287,7 @@ impl TransportPeerEventHandler for RuntimeSession { &data.wire_expr, data.ext_qos, data.payload, - data.ext_nodeid.node_id.into(), + data.ext_nodeid.node_id, ); return Ok(()); } From 6e1c124db9f95b0ac5719139a6ebda3e30837a08 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 7 Nov 2023 16:00:52 +0100 Subject: [PATCH 004/122] Structs split --- zenoh/src/net/routing/dispatcher/face.rs | 289 ++--------- zenoh/src/net/routing/dispatcher/pubsub.rs | 456 ++++------------ zenoh/src/net/routing/dispatcher/queries.rs | 413 ++++----------- zenoh/src/net/routing/dispatcher/resource.rs | 131 ++--- zenoh/src/net/routing/dispatcher/tables.rs | 5 +- zenoh/src/net/routing/hat/mod.rs | 467 ++++++++++++++++- zenoh/src/net/routing/hat/network.rs | 12 +- zenoh/src/net/routing/hat/pubsub.rs | 518 ++++++++++++++----- zenoh/src/net/routing/hat/queries.rs | 475 ++++++++++++----- zenoh/src/net/routing/router.rs | 285 +--------- zenoh/src/net/runtime/adminspace.rs | 164 +++--- 11 files changed, 1595 insertions(+), 1620 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 945989ce28..11f4b2d17a 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -14,8 +14,9 @@ use crate::net::routing::hat::HatFace; // ZettaScale Zenoh Team, // use super::super::router::*; -use super::tables::{Tables, TablesLock}; +use super::tables::TablesLock; use super::{resource::*, tables}; +use std::any::Any; use std::collections::HashMap; use std::fmt; use std::sync::Arc; @@ -41,7 +42,7 @@ pub struct FaceState { pub(crate) next_qid: RequestId, pub(crate) pending_queries: HashMap>, pub(crate) mcast_group: Option, - pub(crate) hat: HatFace, + pub(crate) hat: Box, } impl FaceState { @@ -67,7 +68,7 @@ impl FaceState { next_qid: 0, pending_queries: HashMap::new(), mcast_group, - hat: HatFace::new(), + hat: Box::new(HatFace::new()), }) } @@ -91,62 +92,6 @@ impl FaceState { } id } - - pub(crate) fn get_router(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables - .hat - .routers_net - .as_ref() - .unwrap() - .get_link(self.link_id) - { - Some(link) => match link.get_zid(nodeid) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - self - ); - None - } - } - } - - pub(crate) fn get_peer(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables - .hat - .peers_net - .as_ref() - .unwrap() - .get_link(self.link_id) - { - Some(link) => match link.get_zid(nodeid) { - Some(router) => Some(*router), - None => { - log::error!( - "Received peer declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in peers network for {}", - self - ); - None - } - } - } } impl fmt::Display for FaceState { @@ -172,212 +117,38 @@ impl Primitives for Face { unregister_expr(&self.tables, &mut self.state.clone(), m.id); } zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_router_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.hat.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_peer_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - peer, - ) - } - } else { - declare_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ) - } - } - _ => declare_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ), - } + declare_subscription( + &self.tables, + &mut self.state.clone(), + &m.wire_expr, + &m.ext_info, + msg.ext_nodeid.node_id, + ); } zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_router_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.hat.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_peer_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &peer, - ) - } - } else { - forget_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ) - } - } - _ => forget_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ), - } + forget_subscription( + &self.tables, + &mut self.state.clone(), + &m.ext_wire_expr.wire_expr, + msg.ext_nodeid.node_id, + ); } zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_router_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.hat.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_peer_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - peer, - ) - } - } else { - declare_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ) - } - } - _ => declare_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ), - } + declare_queryable( + &self.tables, + &mut self.state.clone(), + &m.wire_expr, + &m.ext_info, + msg.ext_nodeid.node_id, + ); } zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_router_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.hat.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_peer_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &peer, - ) - } - } else { - forget_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ) - } - } - _ => forget_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ), - } + forget_queryable( + &self.tables, + &mut self.state.clone(), + &m.ext_wire_expr.wire_expr, + msg.ext_nodeid.node_id, + ); } zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index c6489448cf..97529b8f69 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,230 +11,22 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::pubsub::compute_data_route; +use super::super::hat::pubsub::{compute_data_route, compute_data_routes, compute_data_routes_}; use super::face::FaceState; -use super::resource::{DataRoutes, Direction, PullCaches, Resource, Route}; -use super::tables::{RoutingContext, RoutingExpr, Tables}; -use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::convert::TryFrom; +use super::resource::{DataRoutes, Direction, PullCaches, Resource}; +use super::tables::{compute_matching_pulls, RoutingContext, RoutingExpr, Tables}; +use crate::net::routing::hat::map_routing_context; +use crate::net::routing::hat::pubsub::{egress_filter, ingress_filter}; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; use zenoh_protocol::{ - core::{key_expr::OwnedKeyExpr, WhatAmI, WireExpr}, - network::{ - declare::{ext, Mode}, - Push, - }, + core::{WhatAmI, WireExpr}, + network::{declare::ext, Push}, zenoh::PushBody, }; use zenoh_sync::get_mut_unchecked; -fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return Arc::new(pull_caches); - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - Arc::new(pull_caches) -} - -pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .hat - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, - ); - } - - routes.peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.hat.full_net(WhatAmI::Peer) - { - let indexes = tables - .hat - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - routes.client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - routes.peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); - routes -} - -pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .hat - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, - ); - } - - res_mut.context_mut().peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.hat.full_net(WhatAmI::Peer) - { - let indexes = tables - .hat - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - peers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); - } -} - pub(crate) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { compute_data_routes(tables, res); let res = get_mut_unchecked(res); @@ -311,98 +103,95 @@ macro_rules! treat_timestamp { } } -#[inline] -fn get_data_route( - tables: &Tables, - face: &FaceState, - res: &Option>, - expr: &mut RoutingExpr, - routing_context: RoutingContext, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => { - let routers_net = tables.hat.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.routers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, local_context, face.whatami) - }) - } - WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { - let peers_net = tables.hat.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, local_context, face.whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_data_route()) - .unwrap_or_else(|| { - compute_data_route( - tables, - expr, - RoutingContext::default(), - face.whatami, - ) - }) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_data_route(RoutingContext::default())) - .unwrap_or_else(|| { - compute_data_route(tables, expr, RoutingContext::default(), face.whatami) - }), - }, - WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { - match face.whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.hat.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, local_context, face.whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_data_route(RoutingContext::default())) - .unwrap_or_else(|| { - compute_data_route( - tables, - expr, - RoutingContext::default(), - face.whatami, - ) - }), - } - } else { - res.as_ref() - .and_then(|res| match face.whatami { - WhatAmI::Client => res.client_data_route(), - _ => res.peer_data_route(), - }) - .unwrap_or_else(|| { - compute_data_route(tables, expr, RoutingContext::default(), face.whatami) - }) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_data_route()) - .unwrap_or_else(|| { - compute_data_route(tables, expr, RoutingContext::default(), face.whatami) - }), - } -} +// #[inline] +// fn get_data_route( +// tables: &Tables, +// face: &FaceState, +// res: &Option>, +// expr: &mut RoutingExpr, +// routing_context: RoutingContext, +// ) -> Arc { +// let local_context = map_routing_context(tables, face, routing_context); +// match tables.whatami { +// WhatAmI::Router => match face.whatami { +// WhatAmI::Router => { +// res.as_ref() +// .and_then(|res| res.routers_data_route(local_context)) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, local_context, face.whatami) +// }) +// } +// WhatAmI::Peer => { +// if tables.hat.full_net(WhatAmI::Peer) { +// res.as_ref() +// .and_then(|res| res.peers_data_route(local_context)) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, local_context, face.whatami) +// }) +// } else { +// res.as_ref() +// .and_then(|res| res.peer_data_route()) +// .unwrap_or_else(|| { +// compute_data_route( +// tables, +// expr, +// RoutingContext::default(), +// face.whatami, +// ) +// }) +// } +// } +// _ => res +// .as_ref() +// .and_then(|res| res.routers_data_route(RoutingContext::default())) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// }), +// }, +// WhatAmI::Peer => { +// if tables.hat.full_net(WhatAmI::Peer) { +// match face.whatami { +// WhatAmI::Router | WhatAmI::Peer => { +// let peers_net = tables.hat.peers_net.as_ref().unwrap(); +// let local_context = +// peers_net.get_local_context(routing_context, face.link_id); +// res.as_ref() +// .and_then(|res| res.peers_data_route(local_context)) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, local_context, face.whatami) +// }) +// } +// _ => res +// .as_ref() +// .and_then(|res| res.peers_data_route(RoutingContext::default())) +// .unwrap_or_else(|| { +// compute_data_route( +// tables, +// expr, +// RoutingContext::default(), +// face.whatami, +// ) +// }), +// } +// } else { +// res.as_ref() +// .and_then(|res| match face.whatami { +// WhatAmI::Client => res.client_data_route(), +// _ => res.peer_data_route(), +// }) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// }) +// } +// } +// _ => res +// .as_ref() +// .and_then(|res| res.client_data_route()) +// .unwrap_or_else(|| { +// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// }), +// } +// } #[inline] fn get_matching_pulls( @@ -430,38 +219,6 @@ macro_rules! cache_data { }; } -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id - && match (src_face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.hat.peers_net.is_none() - || tables.zid - == *tables.hat.elect_router( - &tables.zid, - expr.full_expr(), - tables.hat.get_router_links(outface.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.hat.full_net(WhatAmI::Peer) - || tables.hat.failover_brokering(src_face.zid, outface.zid)); - } - false -} - #[cfg(feature = "stats")] macro_rules! inc_stats { ( @@ -515,18 +272,13 @@ pub fn full_reentrant_route_data( inc_stats!(face, rx, admin, payload) } - if tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || tables.hat.peers_net.is_none() - || tables.zid - == *tables.hat.elect_router( - &tables.zid, - expr.full_expr(), - tables.hat.get_router_links(face.zid), - ) - { + if ingress_filter(&tables, face, &mut expr) { let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_data_route(&tables, face, &res, &mut expr, routing_context); + + // let route = get_data_route(&tables, face, &res, &mut expr, routing_context); + let local_context = map_routing_context(&tables, face, routing_context); + let route = compute_data_route(&tables, &mut expr, local_context, face.whatami); + let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); if !(route.is_empty() && matching_pulls.is_empty()) { @@ -534,7 +286,7 @@ pub fn full_reentrant_route_data( if route.len() == 1 && matching_pulls.len() == 0 { let (outface, key_expr, context) = route.values().next().unwrap(); - if should_route(&tables, face, outface, &mut expr) { + if egress_filter(&tables, face, outface, &mut expr) { drop(tables); #[cfg(feature = "stats")] if !admin { @@ -562,7 +314,7 @@ pub fn full_reentrant_route_data( let route = route .values() .filter(|(outface, _key_expr, _context)| { - should_route(&tables, face, outface, &mut expr) + egress_filter(&tables, face, outface, &mut expr) }) .cloned() .collect::>(); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 13cabfe666..856fec3e3c 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -1,3 +1,5 @@ +use crate::net::routing::hat::pubsub::ingress_filter; + // // Copyright (c) 2023 ZettaScale Technology // @@ -13,16 +15,18 @@ // use super::super::hat::queries::compute_local_replies; use super::super::hat::queries::compute_query_route; +use super::super::hat::queries::compute_query_routes; +use super::super::hat::queries::compute_query_routes_; use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; +use super::tables::egress_filter; use super::tables::RoutingContext; use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; -use petgraph::graph::NodeIndex; use std::collections::HashMap; use std::sync::{Arc, Weak}; use zenoh_protocol::{ - core::{Encoding, WhatAmI, WireExpr}, + core::{Encoding, WireExpr}, network::{ declare::ext, request::{ext::TargetType, Request, RequestId}, @@ -38,181 +42,6 @@ pub(crate) struct Query { src_qid: RequestId, } -pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .hat - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, - ); - } - - routes.peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.hat.full_net(WhatAmI::Peer) - { - let indexes = tables - .hat - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - routes.peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - routes -} - -pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .hat - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, - ); - } - - res_mut.context_mut().peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.hat.full_net(WhatAmI::Peer) - { - let indexes = tables - .hat - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - } -} - pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { compute_query_routes(tables, res); let res = get_mut_unchecked(res); @@ -248,33 +77,6 @@ fn insert_pending_query(outface: &mut Arc, query: Arc) -> Requ qid } -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.hat.peers_net.is_none() - || tables.zid - == *tables.hat.elect_router( - &tables.zid, - expr.full_expr(), - tables.hat.get_router_links(outface.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.hat.full_net(WhatAmI::Peer) - || tables.hat.failover_brokering(src_face.zid, outface.zid)); - } - false -} - #[inline] fn compute_final_route( tables: &Tables, @@ -288,7 +90,7 @@ fn compute_final_route( TargetType::All => { let mut route = HashMap::new(); for qabl in qabls.iter() { - if should_route(tables, src_face, &qabl.direction.0, expr) { + if egress_filter(tables, src_face, &qabl.direction.0, expr) { #[cfg(feature = "complete_n")] { route.entry(qabl.direction.0.id).or_insert_with(|| { @@ -312,7 +114,7 @@ fn compute_final_route( TargetType::AllComplete => { let mut route = HashMap::new(); for qabl in qabls.iter() { - if qabl.complete > 0 && should_route(tables, src_face, &qabl.direction.0, expr) { + if qabl.complete > 0 && egress_filter(tables, src_face, &qabl.direction.0, expr) { #[cfg(feature = "complete_n")] { route.entry(qabl.direction.0.id).or_insert_with(|| { @@ -452,98 +254,98 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc>, - expr: &mut RoutingExpr, - routing_context: RoutingContext, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => { - let routers_net = tables.hat.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.routers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, local_context, face.whatami) - }) - } - WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { - let peers_net = tables.hat.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, local_context, face.whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_query_route()) - .unwrap_or_else(|| { - compute_query_route( - tables, - expr, - RoutingContext::default(), - face.whatami, - ) - }) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_query_route(RoutingContext::default())) - .unwrap_or_else(|| { - compute_query_route(tables, expr, RoutingContext::default(), face.whatami) - }), - }, - WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { - match face.whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.hat.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, local_context, face.whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_query_route(RoutingContext::default())) - .unwrap_or_else(|| { - compute_query_route( - tables, - expr, - RoutingContext::default(), - face.whatami, - ) - }), - } - } else { - res.as_ref() - .and_then(|res| match face.whatami { - WhatAmI::Client => res.client_query_route(), - _ => res.peer_query_route(), - }) - .unwrap_or_else(|| { - compute_query_route(tables, expr, RoutingContext::default(), face.whatami) - }) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_query_route()) - .unwrap_or_else(|| { - compute_query_route(tables, expr, RoutingContext::default(), face.whatami) - }), - } -} +// #[inline] +// fn get_query_route( +// tables: &Tables, +// face: &FaceState, +// res: &Option>, +// expr: &mut RoutingExpr, +// routing_context: RoutingContext, +// ) -> Arc { +// match tables.whatami { +// WhatAmI::Router => match face.whatami { +// WhatAmI::Router => { +// let routers_net = tables.hat.routers_net.as_ref().unwrap(); +// let local_context = routers_net.get_local_context(routing_context, face.link_id); +// res.as_ref() +// .and_then(|res| res.routers_query_route(local_context)) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, local_context, face.whatami) +// }) +// } +// WhatAmI::Peer => { +// if tables.hat.full_net(WhatAmI::Peer) { +// let peers_net = tables.hat.peers_net.as_ref().unwrap(); +// let local_context = peers_net.get_local_context(routing_context, face.link_id); +// res.as_ref() +// .and_then(|res| res.peers_query_route(local_context)) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, local_context, face.whatami) +// }) +// } else { +// res.as_ref() +// .and_then(|res| res.peer_query_route()) +// .unwrap_or_else(|| { +// compute_query_route( +// tables, +// expr, +// RoutingContext::default(), +// face.whatami, +// ) +// }) +// } +// } +// _ => res +// .as_ref() +// .and_then(|res| res.routers_query_route(RoutingContext::default())) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, RoutingContext::default(), face.whatami) +// }), +// }, +// WhatAmI::Peer => { +// if tables.hat.full_net(WhatAmI::Peer) { +// match face.whatami { +// WhatAmI::Router | WhatAmI::Peer => { +// let peers_net = tables.hat.peers_net.as_ref().unwrap(); +// let local_context = +// peers_net.get_local_context(routing_context, face.link_id); +// res.as_ref() +// .and_then(|res| res.peers_query_route(local_context)) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, local_context, face.whatami) +// }) +// } +// _ => res +// .as_ref() +// .and_then(|res| res.peers_query_route(RoutingContext::default())) +// .unwrap_or_else(|| { +// compute_query_route( +// tables, +// expr, +// RoutingContext::default(), +// face.whatami, +// ) +// }), +// } +// } else { +// res.as_ref() +// .and_then(|res| match face.whatami { +// WhatAmI::Client => res.client_query_route(), +// _ => res.peer_query_route(), +// }) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, RoutingContext::default(), face.whatami) +// }) +// } +// } +// _ => res +// .as_ref() +// .and_then(|res| res.client_query_route()) +// .unwrap_or_else(|| { +// compute_query_route(tables, expr, RoutingContext::default(), face.whatami) +// }), +// } +// } #[cfg(feature = "stats")] macro_rules! inc_req_stats { @@ -642,18 +444,9 @@ pub fn route_query( inc_req_stats!(face, rx, admin, body) } - if rtables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || rtables.hat.peers_net.is_none() - || rtables.zid - == *rtables.hat.elect_router( - &rtables.zid, - expr.full_expr(), - rtables.hat.get_router_links(face.zid), - ) - { - let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_query_route(&rtables, face, &res, &mut expr, routing_context); + if ingress_filter(&rtables, face, &mut expr) { + // let res = Resource::get_resource(&prefix, expr.suffix); + let route = compute_query_route(&rtables, &mut expr, routing_context, face.whatami); let query = Arc::new(Query { src_face: face.clone(), diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 4569706d3d..41ffaf7a52 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -14,6 +14,7 @@ use super::face::FaceState; use super::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatContext; +use std::any::Any; use std::collections::HashMap; use std::convert::TryInto; use std::hash::{Hash, Hasher}; @@ -77,7 +78,7 @@ pub(crate) struct QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, pub(crate) matching_pulls: Arc, - pub(crate) hat: HatContext, + pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) routers_data_routes: Vec>, pub(crate) peers_data_routes: Vec>, @@ -95,7 +96,7 @@ impl ResourceContext { ResourceContext { matches: Vec::new(), matching_pulls: Arc::new(Vec::new()), - hat: HatContext::new(), + hat: Box::new(HatContext::new()), valid_data_routes: false, routers_data_routes: Vec::new(), peers_data_routes: Vec::new(), @@ -263,69 +264,69 @@ impl Resource { } } - #[inline(always)] - pub(crate) fn routers_query_route( - &self, - context: RoutingContext, - ) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_query_routes { - (ctx.routers_query_routes.len() > context as usize) - .then(|| ctx.routers_query_routes[context as usize].clone()) - } else { - None - } - } - None => None, - } - } - - #[inline(always)] - pub(crate) fn peers_query_route( - &self, - context: RoutingContext, - ) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_query_routes { - (ctx.peers_query_routes.len() > context as usize) - .then(|| ctx.peers_query_routes[context as usize].clone()) - } else { - None - } - } - None => None, - } - } - - #[inline(always)] - pub(crate) fn peer_query_route(&self) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_query_routes { - ctx.peer_query_route.clone() - } else { - None - } - } - None => None, - } - } - - #[inline(always)] - pub(crate) fn client_query_route(&self) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_query_routes { - ctx.client_query_route.clone() - } else { - None - } - } - None => None, - } - } + // #[inline(always)] + // pub(crate) fn routers_query_route( + // &self, + // context: RoutingContext, + // ) -> Option> { + // match &self.context { + // Some(ctx) => { + // if ctx.valid_query_routes { + // (ctx.routers_query_routes.len() > context as usize) + // .then(|| ctx.routers_query_routes[context as usize].clone()) + // } else { + // None + // } + // } + // None => None, + // } + // } + + // #[inline(always)] + // pub(crate) fn peers_query_route( + // &self, + // context: RoutingContext, + // ) -> Option> { + // match &self.context { + // Some(ctx) => { + // if ctx.valid_query_routes { + // (ctx.peers_query_routes.len() > context as usize) + // .then(|| ctx.peers_query_routes[context as usize].clone()) + // } else { + // None + // } + // } + // None => None, + // } + // } + + // #[inline(always)] + // pub(crate) fn peer_query_route(&self) -> Option> { + // match &self.context { + // Some(ctx) => { + // if ctx.valid_query_routes { + // ctx.peer_query_route.clone() + // } else { + // None + // } + // } + // None => None, + // } + // } + + // #[inline(always)] + // pub(crate) fn client_query_route(&self) -> Option> { + // match &self.context { + // Some(ctx) => { + // if ctx.valid_query_routes { + // ctx.client_query_route.clone() + // } else { + // None + // } + // } + // None => None, + // } + // } pub fn root() -> Arc { Arc::new(Resource { diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 20ec1deec6..ed84f46c0f 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -19,6 +19,7 @@ use super::face::FaceState; pub use super::pubsub::*; pub use super::queries::*; pub use super::resource::*; +use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Weak}; use std::sync::{Mutex, RwLock}; @@ -71,7 +72,7 @@ pub struct Tables { pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, pub(crate) pull_caches_lock: Mutex<()>, - pub(crate) hat: HatTables, + pub(crate) hat: Box, } impl Tables { @@ -96,7 +97,7 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], pull_caches_lock: Mutex::new(()), - hat: HatTables::new(router_peers_failover_brokering), + hat: Box::new(HatTables::new(router_peers_failover_brokering)), } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index a36aaa8331..a54837f5ab 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -18,12 +18,26 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ - network::Network, pubsub::undeclare_client_subscription, queries::undeclare_client_queryable, + network::Network, + pubsub::undeclare_client_subscription, + queries::{compute_query_routes_, undeclare_client_queryable}, }; use super::dispatcher::{ face::FaceState, - queries::compute_query_routes_, - tables::{compute_data_routes_, Resource, TablesLock}, + tables::{compute_data_routes_, Resource, RoutingContext, Tables, TablesLock}, +}; +use crate::{ + hat, hat_mut, + net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::hat::{ + network::shared_nodes, + pubsub::{pubsub_linkstate_change, pubsub_remove_node}, + queries::{queries_linkstate_change, queries_remove_node}, + }, + }, + runtime::Runtime, }; use async_std::task::JoinHandle; use std::{ @@ -31,9 +45,14 @@ use std::{ hash::Hasher, sync::Arc, }; -use zenoh_config::{WhatAmI, ZenohId}; -use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::{ + common::ZExtBody, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, +}; +use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; +use zenoh_transport::{Mux, TransportUnicast}; pub mod network; pub mod pubsub; @@ -164,11 +183,7 @@ impl HatTables { .unwrap_or(false) } - pub(crate) fn schedule_compute_trees( - &mut self, - tables_ref: Arc, - net_type: WhatAmI, - ) { + fn schedule_compute_trees_(&mut self, tables_ref: Arc, net_type: WhatAmI) { log::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) @@ -180,8 +195,12 @@ impl HatTables { log::trace!("Compute trees"); let new_childs = match net_type { - WhatAmI::Router => tables.hat.routers_net.as_mut().unwrap().compute_trees(), - _ => tables.hat.peers_net.as_mut().unwrap().compute_trees(), + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), }; log::trace!("Compute routes"); @@ -190,8 +209,8 @@ impl HatTables { log::trace!("Computations completed"); match net_type { - WhatAmI::Router => tables.hat.routers_trees_task = None, - _ => tables.hat.peers_trees_task = None, + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, }; })); match net_type { @@ -255,7 +274,13 @@ pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { face.local_mappings.clear(); let mut subs_matches = vec![]; - for mut res in face.hat.remote_subs.drain() { + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); @@ -275,7 +300,13 @@ pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { } let mut qabls_matches = vec![]; - for mut res in face.hat.remote_qabls.drain() { + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); @@ -323,3 +354,407 @@ pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { drop(wtables); drop(ctrl_lock); } + +#[allow(clippy::too_many_arguments)] +pub(crate) fn init( + tables: &Arc, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, +) { + let mut tables = zwrite!(tables.tables); + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } +} + +pub(crate) fn new_transport_unicast( + tables_ref: &Arc, + transport: TransportUnicast, +) -> ZResult> { + let ctrl_lock = zlock!(tables_ref.ctrl_lock); + let mut tables = zwrite!(tables_ref.tables); + let whatami = transport.get_whatami()?; + + let link_id = match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + let face = tables + .open_net_face( + transport.get_zid()?, + transport.get_whatami()?, + #[cfg(feature = "stats")] + transport.get_stats()?, + Arc::new(Mux::new(transport)), + link_id, + ) + .upgrade() + .unwrap(); + + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + drop(tables); + drop(ctrl_lock); + Ok(face) +} + +pub(crate) fn handle_oam( + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, +) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let ctrl_lock = zlock!(tables_ref.ctrl_lock); + let mut tables = zwrite!(tables_ref.tables); + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + &mut tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + &mut tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + &mut tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + &mut tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + drop(tables); + drop(ctrl_lock); + } + } + } + + Ok(()) +} + +pub(crate) fn closing(tables_ref: &Arc, transport: &TransportUnicast) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + let ctrl_lock = zlock!(tables_ref.ctrl_lock); + let mut tables = zwrite!(tables_ref.tables); + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + drop(tables); + drop(ctrl_lock); + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) +} + +pub(crate) fn map_routing_context( + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, +) -> RoutingContext { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + } +} + +#[macro_export] +macro_rules! hat { + ($t:expr) => { + $t.hat.downcast_ref::().unwrap() + }; +} + +#[macro_export] +macro_rules! hat_mut { + ($t:expr) => { + $t.hat.downcast_mut::().unwrap() + }; +} + +#[macro_export] +macro_rules! res_hat { + ($r:expr) => { + $r.context().hat.downcast_ref::().unwrap() + }; +} + +#[macro_export] +macro_rules! res_hat_mut { + ($r:expr) => { + get_mut_unchecked($r) + .context_mut() + .hat + .downcast_mut::() + .unwrap() + }; +} + +#[macro_export] +macro_rules! face_hat { + ($f:expr) => { + $f.hat.downcast_ref::().unwrap() + }; +} + +#[macro_export] +macro_rules! face_hat_mut { + ($f:expr) => { + get_mut_unchecked($f).hat.downcast_mut::().unwrap() + }; +} + +fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received router declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in routers network for {}", + face + ); + None + } + } +} + +fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received peer declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in peers network for {}", + face + ); + None + } + } +} diff --git a/zenoh/src/net/routing/hat/network.rs b/zenoh/src/net/routing/hat/network.rs index 94f241bbcf..43f94e4f9d 100644 --- a/zenoh/src/net/routing/hat/network.rs +++ b/zenoh/src/net/routing/hat/network.rs @@ -161,12 +161,12 @@ impl Network { } //noinspection ALL - pub(crate) fn dot(&self) -> String { - std::format!( - "{:?}", - petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) - ) - } + // pub(crate) fn dot(&self) -> String { + // std::format!( + // "{:?}", + // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) + // ) + // } #[inline] pub(crate) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index d511ac210e..c6eda3adda 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -1,5 +1,3 @@ -use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; - // // Copyright (c) 2023 ZettaScale Technology // @@ -19,7 +17,9 @@ use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContex use super::super::dispatcher::tables::{Tables, TablesLock}; use super::super::PREFIX_LIVELINESS; use super::network::Network; -use super::HatTables; +use super::{get_peer, get_router, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -43,7 +43,7 @@ fn send_sourced_subscription_to_net_childs( res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, - routing_context: Option, + routing_context: RoutingContext, ) { for child in childs { if net.graph.contains_node(*child) { @@ -58,7 +58,7 @@ fn send_sourced_subscription_to_net_childs( ext_qos: ext::QoSType::declare_default(), ext_tstamp: None, ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + node_id: routing_context, }, body: DeclareBody::DeclareSubscriber(DeclareSubscriber { id: 0, // TODO @@ -84,7 +84,7 @@ fn propagate_simple_subscription_to( full_peer_net: bool, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) - && !dst_face.hat.local_subs.contains(res) + && !face_hat!(dst_face).local_subs.contains(res) && match tables.whatami { WhatAmI::Router => { if full_peer_net { @@ -93,7 +93,7 @@ fn propagate_simple_subscription_to( dst_face.whatami != WhatAmI::Router && (src_face.whatami != WhatAmI::Peer || dst_face.whatami != WhatAmI::Peer - || tables.hat.failover_brokering(src_face.zid, dst_face.zid)) + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } } WhatAmI::Peer => { @@ -106,10 +106,7 @@ fn propagate_simple_subscription_to( _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, } { - get_mut_unchecked(dst_face) - .hat - .local_subs - .insert(res.clone()); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); dst_face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -130,7 +127,7 @@ fn propagate_simple_subscription( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - let full_peer_net = tables.hat.full_net(WhatAmI::Peer); + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables .faces .values() @@ -156,7 +153,7 @@ fn propagate_sourced_subscription( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -167,7 +164,7 @@ fn propagate_sourced_subscription( res, src_face, sub_info, - Some(tree_sid.index() as RoutingContext), + tree_sid.index() as RoutingContext, ); } else { log::trace!( @@ -193,7 +190,7 @@ fn register_router_subscription( sub_info: &SubscriberInfo, router: ZenohId, ) { - if !res.context().hat.router_subs.contains(&router) { + if !res_hat!(res).router_subs.contains(&router) { // Register router subscription { log::debug!( @@ -201,19 +198,15 @@ fn register_router_subscription( res.expr(), router ); - get_mut_unchecked(res) - .context_mut() - .hat - .router_subs - .insert(router); - tables.hat.router_subs.insert(res.clone()); + res_hat_mut!(res).router_subs.insert(router); + hat_mut!(tables).router_subs.insert(res.clone()); } // Propagate subscription to routers propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); } // Propagate subscription to peers - if tables.hat.full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { register_peer_subscription(tables, face, res, sub_info, tables.zid) } @@ -221,7 +214,7 @@ fn register_router_subscription( propagate_simple_subscription(tables, res, sub_info, face); } -pub fn declare_router_subscription( +fn declare_router_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -284,16 +277,12 @@ fn register_peer_subscription( sub_info: &SubscriberInfo, peer: ZenohId, ) { - if !res.context().hat.peer_subs.contains(&peer) { + if !res_hat!(res).peer_subs.contains(&peer) { // Register peer subscription { log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - get_mut_unchecked(res) - .context_mut() - .hat - .peer_subs - .insert(peer); - tables.hat.peer_subs.insert(res.clone()); + res_hat_mut!(res).peer_subs.insert(peer); + hat_mut!(tables).peer_subs.insert(res.clone()); } // Propagate subscription to peers @@ -306,7 +295,7 @@ fn register_peer_subscription( } } -pub fn declare_peer_subscription( +fn declare_peer_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -404,10 +393,10 @@ fn register_client_subscription( } } } - get_mut_unchecked(face).hat.remote_subs.insert(res.clone()); + face_hat_mut!(face).remote_subs.insert(res.clone()); } -pub fn declare_client_subscription( +fn declare_client_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -456,7 +445,7 @@ pub fn declare_client_subscription( ); } WhatAmI::Peer => { - if wtables.hat.full_net(WhatAmI::Peer) { + if hat!(wtables).full_net(WhatAmI::Peer) { let zid = wtables.zid; register_peer_subscription( &mut wtables, @@ -522,12 +511,39 @@ pub fn declare_client_subscription( } } +pub fn declare_subscription( + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: RoutingContext, +) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_subscription(tables, rtables, face, expr, sub_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + } + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) + } + } + _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } +} + #[inline] fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() - && res - .context() - .hat + && res_hat!(res) .router_subs .iter() .any(|peer| peer != &tables.zid) @@ -536,9 +552,7 @@ fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { #[inline] fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() - && res - .context() - .hat + && res_hat!(res) .peer_subs .iter() .any(|peer| peer != &tables.zid) @@ -597,7 +611,7 @@ fn send_forget_sourced_subscription_to_net_childs( fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { - if face.hat.local_subs.contains(res) { + if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -608,15 +622,15 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc ext_wire_expr: WireExprType { wire_expr }, }), }); - get_mut_unchecked(face).hat.local_subs.remove(res); + face_hat_mut!(face).local_subs.remove(res); } } } fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !tables.hat.full_net(WhatAmI::Peer) - && res.context().hat.router_subs.len() == 1 - && res.context().hat.router_subs.contains(&tables.zid) + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_subs.len() == 1 + && res_hat!(res).router_subs.contains(&tables.zid) { for mut face in tables .faces @@ -625,13 +639,13 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< .collect::>>() { if face.whatami == WhatAmI::Peer - && face.hat.local_subs.contains(res) + && face_hat!(face).local_subs.contains(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.subs.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.hat.failover_brokering(s.face.zid, face.zid))) + && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -645,7 +659,7 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }), }); - get_mut_unchecked(&mut face).hat.local_subs.remove(res); + face_hat_mut!(&mut face).local_subs.remove(res); } } } @@ -658,7 +672,7 @@ fn propagate_forget_sourced_subscription( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -693,16 +707,14 @@ fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, res.expr(), router ); - get_mut_unchecked(res) - .context_mut() - .hat - .router_subs - .retain(|sub| sub != router); + res_hat_mut!(res).router_subs.retain(|sub| sub != router); - if res.context().hat.router_subs.is_empty() { - tables.hat.router_subs.retain(|sub| !Arc::ptr_eq(sub, res)); + if res_hat!(res).router_subs.is_empty() { + hat_mut!(tables) + .router_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); - if tables.hat.full_net(WhatAmI::Peer) { + if hat_mut!(tables).full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_subscription(tables, res); @@ -717,13 +729,13 @@ fn undeclare_router_subscription( res: &mut Arc, router: &ZenohId, ) { - if res.context().hat.router_subs.contains(router) { + if res_hat!(res).router_subs.contains(router) { unregister_router_subscription(tables, res, router); propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); } } -pub fn forget_router_subscription( +fn forget_router_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -763,14 +775,12 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe res.expr(), peer ); - get_mut_unchecked(res) - .context_mut() - .hat - .peer_subs - .retain(|sub| sub != peer); + res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); - if res.context().hat.peer_subs.is_empty() { - tables.hat.peer_subs.retain(|sub| !Arc::ptr_eq(sub, res)); + if res_hat!(res).peer_subs.is_empty() { + hat_mut!(tables) + .peer_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); if tables.whatami == WhatAmI::Peer { propagate_forget_simple_subscription(tables, res); @@ -784,13 +794,13 @@ fn undeclare_peer_subscription( res: &mut Arc, peer: &ZenohId, ) { - if res.context().hat.peer_subs.contains(peer) { + if res_hat!(res).peer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); } } -pub fn forget_peer_subscription( +fn forget_peer_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -841,7 +851,7 @@ pub(crate) fn undeclare_client_subscription( if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).subs = None; } - get_mut_unchecked(face).hat.remote_subs.remove(res); + face_hat_mut!(face).remote_subs.remove(res); let mut client_subs = client_subs(res); let router_subs = remote_router_subs(tables, res); @@ -856,7 +866,7 @@ pub(crate) fn undeclare_client_subscription( } WhatAmI::Peer => { if client_subs.is_empty() { - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } else { propagate_forget_simple_subscription(tables, res); @@ -871,7 +881,7 @@ pub(crate) fn undeclare_client_subscription( } if client_subs.len() == 1 && !router_subs && !peer_subs { let face = &mut client_subs[0]; - if face.hat.local_subs.contains(res) + if face_hat!(face).local_subs.contains(res) && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -885,12 +895,12 @@ pub(crate) fn undeclare_client_subscription( }), }); - get_mut_unchecked(face).hat.local_subs.remove(res); + face_hat_mut!(face).local_subs.remove(res); } } } -pub fn forget_client_subscription( +fn forget_client_subscription( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -924,6 +934,34 @@ pub fn forget_client_subscription( } } +pub fn forget_subscription( + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, +) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_subscription(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer) + } + } else { + forget_client_subscription(tables, rtables, face, expr) + } + } + _ => forget_client_subscription(tables, rtables, face, expr), + } +} + pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO @@ -932,8 +970,8 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { match tables.whatami { WhatAmI::Router => { if face.whatami == WhatAmI::Client { - for sub in &tables.hat.router_subs { - get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); + for sub in &hat!(tables).router_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -946,23 +984,19 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { }), }); } - } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - for sub in &tables.hat.router_subs { + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for sub in &hat!(tables).router_subs { if sub.context.is_some() - && (sub - .context() - .hat - .router_subs - .iter() - .any(|r| *r != tables.zid) + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) || sub.session_ctxs.values().any(|s| { s.subs.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.hat.failover_brokering(s.face.zid, face.zid))) + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) })) { - get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -979,10 +1013,10 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } } WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { if face.whatami == WhatAmI::Client { - for sub in &tables.hat.peer_subs { - get_mut_unchecked(face).hat.local_subs.insert(sub.clone()); + for sub in &hat!(tables).peer_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -1003,7 +1037,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &src_face.hat.remote_subs { + for sub in &face_hat!(src_face).remote_subs { propagate_simple_subscription_to( tables, face, @@ -1023,7 +1057,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for sub in &src_face.hat.remote_subs { + for sub in &face_hat!(src_face).remote_subs { propagate_simple_subscription_to( tables, face, @@ -1041,11 +1075,10 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { match net_type { WhatAmI::Router => { - for mut res in tables - .hat + for mut res in hat!(tables) .router_subs .iter() - .filter(|res| res.context().hat.router_subs.contains(node)) + .filter(|res| res_hat!(res).router_subs.contains(node)) .cloned() .collect::>>() { @@ -1061,11 +1094,10 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } WhatAmI::Peer => { - for mut res in tables - .hat + for mut res in hat!(tables) .peer_subs .iter() - .filter(|res| res.context().hat.peer_subs.contains(node)) + .filter(|res| res_hat!(res).peer_subs.contains(node)) .cloned() .collect::>>() { @@ -1101,20 +1133,20 @@ pub(crate) fn pubsub_tree_change( // propagate subs to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let subs_res = match net_type { - WhatAmI::Router => &tables.hat.router_subs, - _ => &tables.hat.peer_subs, + WhatAmI::Router => &hat!(tables).router_subs, + _ => &hat!(tables).peer_subs, }; for res in subs_res { let subs = match net_type { - WhatAmI::Router => &res.context().hat.router_subs, - _ => &res.context().hat.peer_subs, + WhatAmI::Router => &res_hat!(res).router_subs, + _ => &res_hat!(res).peer_subs, }; for sub in subs { if *sub == tree_id { @@ -1129,7 +1161,7 @@ pub(crate) fn pubsub_tree_change( res, None, &sub_info, - Some(tree_sid as RoutingContext), + tree_sid as RoutingContext, ); } } @@ -1144,11 +1176,11 @@ pub(crate) fn pubsub_tree_change( pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { - if tables.hat.router_peers_failover_brokering + if hat!(tables).router_peers_failover_brokering && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { - for res in &src_face.hat.remote_subs { + for res in &face_hat!(src_face).remote_subs { let client_subs = res .session_ctxs .values() @@ -1160,11 +1192,10 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.hat.local_subs.contains(res) { + if face_hat!(dst_face).local_subs.contains(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { - let ctx_links = tables - .hat + let ctx_links = hat!(tables) .peers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) @@ -1192,14 +1223,11 @@ pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: ), }); - get_mut_unchecked(dst_face).hat.local_subs.remove(res); + face_hat_mut!(dst_face).local_subs.remove(res); } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; - get_mut_unchecked(dst_face) - .hat - .local_subs - .insert(res.clone()); + face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // TODO @@ -1289,17 +1317,15 @@ pub(crate) fn compute_data_route( .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); let master = tables.whatami != WhatAmI::Router - || !tables.hat.full_net(WhatAmI::Peer) - || *tables - .hat - .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) == tables.zid; for mres in matches.iter() { let mres = mres.upgrade().unwrap(); if tables.whatami == WhatAmI::Router { if master || source_type == WhatAmI::Router { - let net = tables.hat.routers_net.as_ref().unwrap(); + let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, _ => net.idx.index() as RoutingContext, @@ -1310,12 +1336,12 @@ pub(crate) fn compute_data_route( tables, net, router_source, - &mres.context().hat.router_subs, + &res_hat!(mres).router_subs, ); } - if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); + if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, @@ -1326,13 +1352,13 @@ pub(crate) fn compute_data_route( tables, net, peer_source, - &mres.context().hat.peer_subs, + &res_hat!(mres).peer_subs, ); } } - if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, @@ -1343,7 +1369,7 @@ pub(crate) fn compute_data_route( tables, net, peer_source, - &mres.context().hat.peer_subs, + &res_hat!(mres).peer_subs, ); } @@ -1383,3 +1409,247 @@ pub(crate) fn compute_data_route( } Arc::new(route) } + +pub(crate) fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } + Arc::new(pull_caches) +} + +pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.routers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.peers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); + routes +} + +pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = Some(compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); + } +} + +#[inline] +pub(crate) fn ingress_filter(tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) +} + +#[inline] +pub(crate) fn egress_filter( + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, +) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false +} diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/queries.rs index 876bb287a6..dd2d3519b4 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -16,9 +16,12 @@ use super::super::dispatcher::queries::*; use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; use super::super::dispatcher::tables::{Tables, TablesLock}; use super::network::Network; -use super::HatTables; -use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; +use super::{get_peer, get_router, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{ + QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, +}; use crate::net::routing::PREFIX_LIVELINESS; +use crate::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -53,18 +56,21 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI } fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.hat.full_net(WhatAmI::Peer) { - res.context.as_ref().and_then(|ctx| { - ctx.hat.peer_qabls.iter().fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) + let info = if hat!(tables).full_net(WhatAmI::Peer) { + res.context.as_ref().and_then(|_| { + res_hat!(res) + .peer_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) }) } else { None @@ -89,8 +95,7 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res.context() - .hat + res_hat!(res) .router_qabls .iter() .fold(None, |accu, (zid, info)| { @@ -126,8 +131,7 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res.context() - .hat + res_hat!(res) .router_qabls .iter() .fold(None, |accu, (zid, info)| { @@ -143,10 +147,8 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) } else { None }; - if res.context.is_some() && tables.hat.full_net(WhatAmI::Peer) { - info = res - .context() - .hat + if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { + info = res_hat!(res) .peer_qabls .iter() .fold(info, |accu, (zid, info)| { @@ -165,7 +167,7 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) .fold(info, |accu, ctx| { if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer || face.whatami != WhatAmI::Peer - || tables.hat.failover_brokering(ctx.face.zid, face.zid) + || hat!(tables).failover_brokering(ctx.face.zid, face.zid) { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { @@ -230,11 +232,11 @@ fn propagate_simple_queryable( res: &Arc, src_face: Option<&mut Arc>, ) { - let full_peers_net = tables.hat.full_net(WhatAmI::Peer); + let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); - let current_info = dst_face.hat.local_qabls.get(res); + let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) && match tables.whatami { @@ -246,7 +248,7 @@ fn propagate_simple_queryable( && (src_face.is_none() || src_face.as_ref().unwrap().whatami != WhatAmI::Peer || dst_face.whatami != WhatAmI::Peer - || tables.hat.failover_brokering( + || hat!(tables).failover_brokering( src_face.as_ref().unwrap().zid, dst_face.zid, )) @@ -268,8 +270,7 @@ fn propagate_simple_queryable( } } { - get_mut_unchecked(&mut dst_face) - .hat + face_hat_mut!(&mut dst_face) .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); @@ -295,7 +296,7 @@ fn propagate_sourced_queryable( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -332,7 +333,7 @@ fn register_router_queryable( qabl_info: &QueryableInfo, router: ZenohId, ) { - let current_info = res.context().hat.router_qabls.get(&router); + let current_info = res_hat!(res).router_qabls.get(&router); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register router queryable { @@ -341,12 +342,8 @@ fn register_router_queryable( res.expr(), router, ); - get_mut_unchecked(res) - .context_mut() - .hat - .router_qabls - .insert(router, *qabl_info); - tables.hat.router_qabls.insert(res.clone()); + res_hat_mut!(res).router_qabls.insert(router, *qabl_info); + hat_mut!(tables).router_qabls.insert(res.clone()); } // Propagate queryable to routers @@ -360,7 +357,7 @@ fn register_router_queryable( ); } - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { // Propagate queryable to peers if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { let local_info = local_peer_qabl_info(tables, res); @@ -433,17 +430,13 @@ fn register_peer_queryable( qabl_info: &QueryableInfo, peer: ZenohId, ) { - let current_info = res.context().hat.peer_qabls.get(&peer); + let current_info = res_hat!(res).peer_qabls.get(&peer); if current_info.is_none() || current_info.unwrap() != qabl_info { // Register peer queryable { log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - get_mut_unchecked(res) - .context_mut() - .hat - .peer_qabls - .insert(peer, *qabl_info); - tables.hat.peer_qabls.insert(res.clone()); + res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); + hat_mut!(tables).peer_qabls.insert(res.clone()); } // Propagate queryable to peers @@ -545,7 +538,7 @@ fn register_client_queryable( })) .qabl = Some(*qabl_info); } - get_mut_unchecked(face).hat.remote_qabls.insert(res.clone()); + face_hat_mut!(face).remote_qabls.insert(res.clone()); } pub fn declare_client_queryable( @@ -597,7 +590,7 @@ pub fn declare_client_queryable( ); } WhatAmI::Peer => { - if wtables.hat.full_net(WhatAmI::Peer) { + if hat!(wtables).full_net(WhatAmI::Peer) { let local_details = local_peer_qabl_info(&wtables, &res); let zid = wtables.zid; register_peer_queryable( @@ -634,12 +627,39 @@ pub fn declare_client_queryable( } } +pub fn declare_queryable( + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: RoutingContext, +) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + } + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info) + } + } + _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } +} + #[inline] fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() - && res - .context() - .hat + && res_hat!(res) .router_qabls .keys() .any(|router| router != &tables.zid) @@ -648,9 +668,7 @@ fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { #[inline] fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() - && res - .context() - .hat + && res_hat!(res) .peer_qabls .keys() .any(|peer| peer != &tables.zid) @@ -709,7 +727,7 @@ fn send_forget_sourced_queryable_to_net_childs( fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { - if face.hat.local_qabls.contains_key(res) { + if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -721,15 +739,15 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - if !tables.hat.full_net(WhatAmI::Peer) - && res.context().hat.router_qabls.len() == 1 - && res.context().hat.router_qabls.contains_key(&tables.zid) + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_qabls.len() == 1 + && res_hat!(res).router_qabls.contains_key(&tables.zid) { for mut face in tables .faces @@ -738,13 +756,13 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc .collect::>>() { if face.whatami == WhatAmI::Peer - && face.hat.local_qabls.contains_key(res) + && face_hat!(face).local_qabls.contains_key(res) && !res.session_ctxs.values().any(|s| { face.zid != s.face.zid && s.qabl.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.hat.failover_brokering(s.face.zid, face.zid))) + && hat!(tables).failover_brokering(s.face.zid, face.zid))) }) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -758,7 +776,7 @@ fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc }), }); - get_mut_unchecked(&mut face).hat.local_qabls.remove(res); + face_hat_mut!(&mut face).local_qabls.remove(res); } } } @@ -771,7 +789,7 @@ fn propagate_forget_sourced_queryable( source: &ZenohId, net_type: WhatAmI, ) { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -806,19 +824,14 @@ fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, rou res.expr(), router, ); - get_mut_unchecked(res) - .context_mut() - .hat - .router_qabls - .remove(router); - - if res.context().hat.router_qabls.is_empty() { - tables - .hat + res_hat_mut!(res).router_qabls.remove(router); + + if res_hat!(res).router_qabls.is_empty() { + hat_mut!(tables) .router_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } propagate_forget_simple_queryable(tables, res); @@ -833,7 +846,7 @@ fn undeclare_router_queryable( res: &mut Arc, router: &ZenohId, ) { - if res.context().hat.router_qabls.contains_key(router) { + if res_hat!(res).router_qabls.contains_key(router) { unregister_router_queryable(tables, res, router); propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); } @@ -876,14 +889,12 @@ pub fn forget_router_queryable( fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - get_mut_unchecked(res) - .context_mut() - .hat - .peer_qabls - .remove(peer); + res_hat_mut!(res).peer_qabls.remove(peer); - if res.context().hat.peer_qabls.is_empty() { - tables.hat.peer_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); + if res_hat!(res).peer_qabls.is_empty() { + hat_mut!(tables) + .peer_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); if tables.whatami == WhatAmI::Peer { propagate_forget_simple_queryable(tables, res); @@ -897,7 +908,7 @@ fn undeclare_peer_queryable( res: &mut Arc, peer: &ZenohId, ) { - if res.context().hat.peer_qabls.contains_key(peer) { + if res_hat!(res).peer_qabls.contains_key(peer) { unregister_peer_queryable(tables, res, peer); propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); } @@ -958,7 +969,7 @@ pub(crate) fn undeclare_client_queryable( if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { get_mut_unchecked(ctx).qabl = None; if ctx.qabl.is_none() { - get_mut_unchecked(face).hat.remote_qabls.remove(res); + face_hat_mut!(face).remote_qabls.remove(res); } } @@ -977,7 +988,7 @@ pub(crate) fn undeclare_client_queryable( } } WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { if client_qabls.is_empty() { undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); } else { @@ -1001,7 +1012,7 @@ pub(crate) fn undeclare_client_queryable( if client_qabls.len() == 1 && !router_qabls && !peer_qabls { let face = &mut client_qabls[0]; - if face.hat.local_qabls.contains_key(res) { + if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -1013,7 +1024,7 @@ pub(crate) fn undeclare_client_queryable( }), }); - get_mut_unchecked(face).hat.local_qabls.remove(res); + face_hat_mut!(face).local_qabls.remove(res); } } } @@ -1052,17 +1063,42 @@ pub fn forget_client_queryable( } } +pub fn forget_queryable( + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, +) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_queryable(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer) + } + } else { + forget_client_queryable(tables, rtables, face, expr) + } + } + _ => forget_client_queryable(tables, rtables, face, expr), + } +} + pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { match tables.whatami { WhatAmI::Router => { if face.whatami == WhatAmI::Client { - for qabl in tables.hat.router_qabls.iter() { + for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .hat - .local_qabls - .insert(qabl.clone(), info); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -1076,27 +1112,20 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { }); } } - } else if face.whatami == WhatAmI::Peer && !tables.hat.full_net(WhatAmI::Peer) { - for qabl in tables.hat.router_qabls.iter() { + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for qabl in hat!(tables).router_qabls.iter() { if qabl.context.is_some() - && (qabl - .context() - .hat - .router_qabls - .keys() - .any(|r| *r != tables.zid) + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) || qabl.session_ctxs.values().any(|s| { s.qabl.is_some() && (s.face.whatami == WhatAmI::Client || (s.face.whatami == WhatAmI::Peer - && tables.hat.failover_brokering(s.face.zid, face.zid))) + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) })) { let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .hat - .local_qabls - .insert(qabl.clone(), info); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -1113,15 +1142,12 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } } WhatAmI::Peer => { - if tables.hat.full_net(WhatAmI::Peer) { + if hat!(tables).full_net(WhatAmI::Peer) { if face.whatami == WhatAmI::Client { - for qabl in &tables.hat.peer_qabls { + for qabl in &hat!(tables).peer_qabls { if qabl.context.is_some() { let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .hat - .local_qabls - .insert(qabl.clone(), info); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); face.primitives.send_declare(Declare { ext_qos: ext::QoSType::declare_default(), @@ -1143,7 +1169,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for qabl in face.hat.remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -1156,7 +1182,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { .cloned() .collect::>>() { - for qabl in face.hat.remote_qabls.iter() { + for qabl in face_hat!(face).remote_qabls.iter() { propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } @@ -1168,8 +1194,8 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: match net_type { WhatAmI::Router => { let mut qabls = vec![]; - for res in tables.hat.router_qabls.iter() { - for qabl in res.context().hat.router_qabls.keys() { + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { if qabl == node { qabls.push(res.clone()); } @@ -1189,8 +1215,8 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } WhatAmI::Peer => { let mut qabls = vec![]; - for res in tables.hat.router_qabls.iter() { - for qabl in res.context().hat.router_qabls.keys() { + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { if qabl == node { qabls.push(res.clone()); } @@ -1225,11 +1251,11 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { - if tables.hat.router_peers_failover_brokering + if hat!(tables).router_peers_failover_brokering && tables.whatami == WhatAmI::Router && src_face.whatami == WhatAmI::Peer { - for res in &src_face.hat.remote_qabls { + for res in &face_hat!(src_face).remote_qabls { let client_qabls = res .session_ctxs .values() @@ -1241,11 +1267,10 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links { let dst_face = &mut get_mut_unchecked(ctx).face; if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.hat.local_qabls.contains_key(res) { + if face_hat!(dst_face).local_qabls.contains_key(res) { let forget = !HatTables::failover_brokering_to(links, dst_face.zid) && { - let ctx_links = tables - .hat + let ctx_links = hat!(tables) .peers_net .as_ref() .map(|net| net.get_links(dst_face.zid)) @@ -1271,13 +1296,12 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }), }); - get_mut_unchecked(dst_face).hat.local_qabls.remove(res); + face_hat_mut!(dst_face).local_qabls.remove(res); } } else if HatTables::failover_brokering_to(links, ctx.face.zid) { let dst_face = &mut get_mut_unchecked(ctx).face; let info = local_qabl_info(tables, res, dst_face); - get_mut_unchecked(dst_face) - .hat + face_hat_mut!(dst_face) .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); @@ -1308,20 +1332,20 @@ pub(crate) fn queries_tree_change( // propagate qabls to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = tables.hat.get_net(net_type).unwrap(); + let net = hat!(tables).get_net(net_type).unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; let qabls_res = match net_type { - WhatAmI::Router => &tables.hat.router_qabls, - _ => &tables.hat.peer_qabls, + WhatAmI::Router => &hat!(tables).router_qabls, + _ => &hat!(tables).peer_qabls, }; for res in qabls_res { let qabls = match net_type { - WhatAmI::Router => &res.context().hat.router_qabls, - _ => &res.context().hat.peer_qabls, + WhatAmI::Router => &res_hat!(res).router_qabls, + _ => &res_hat!(res).peer_qabls, }; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_childs( @@ -1421,10 +1445,8 @@ pub(crate) fn compute_query_route( .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); let master = tables.whatami != WhatAmI::Router - || !tables.hat.full_net(WhatAmI::Peer) - || *tables - .hat - .elect_router(&tables.zid, &key_expr, tables.hat.shared_nodes.iter()) + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) == tables.zid; for mres in matches.iter() { @@ -1432,7 +1454,7 @@ pub(crate) fn compute_query_route( let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); if tables.whatami == WhatAmI::Router { if master || source_type == WhatAmI::Router { - let net = tables.hat.routers_net.as_ref().unwrap(); + let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, _ => net.idx.index() as RoutingContext, @@ -1443,13 +1465,13 @@ pub(crate) fn compute_query_route( tables, net, router_source, - &mres.context().hat.router_qabls, + &res_hat!(mres).router_qabls, complete, ); } - if (master || source_type != WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); + if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, @@ -1460,14 +1482,14 @@ pub(crate) fn compute_query_route( tables, net, peer_source, - &mres.context().hat.peer_qabls, + &res_hat!(mres).peer_qabls, complete, ); } } - if tables.whatami == WhatAmI::Peer && tables.hat.full_net(WhatAmI::Peer) { - let net = tables.hat.peers_net.as_ref().unwrap(); + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, @@ -1478,7 +1500,7 @@ pub(crate) fn compute_query_route( tables, net, peer_source, - &mres.context().hat.peer_qabls, + &res_hat!(mres).peer_qabls, complete, ); } @@ -1542,8 +1564,8 @@ pub(crate) fn compute_local_replies( for mres in matches.iter() { let mres = mres.upgrade().unwrap(); if (mres.context.is_some() - && (!mres.context().hat.router_subs.is_empty() - || !mres.context().hat.peer_subs.is_empty())) + && (!res_hat!(mres).router_subs.is_empty() + || !res_hat!(mres).peer_subs.is_empty())) || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); @@ -1553,3 +1575,174 @@ pub(crate) fn compute_local_replies( } result } + +pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.routers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.peers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes +} + +pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + } +} diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 06745461e5..5788d053c0 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -17,12 +17,13 @@ pub use super::dispatcher::queries::*; pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; -use super::hat::network::{shared_nodes, Network}; +use super::hat::closing; +use super::hat::init; +use super::hat::new_transport_unicast; pub use super::hat::pubsub::*; pub use super::hat::queries::*; use super::runtime::Runtime; -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::LinkStateList; +use crate::net::routing::hat::handle_oam; use std::any::Any; use std::str::FromStr; use std::sync::Arc; @@ -30,19 +31,17 @@ use std::sync::{Mutex, RwLock}; use std::time::Duration; use uhlc::HLC; use zenoh_link::Link; -use zenoh_protocol::common::ZExtBody; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{ - DeMux, DummyPrimitives, McastMux, Mux, Primitives, TransportMulticast, TransportPeer, + DeMux, DummyPrimitives, McastMux, Primitives, TransportMulticast, TransportPeer, TransportPeerEventHandler, TransportUnicast, }; // use zenoh_collections::Timer; use zenoh_result::ZResult; pub struct Router { - whatami: WhatAmI, + // whatami: WhatAmI, pub tables: Arc, } @@ -56,7 +55,7 @@ impl Router { queries_default_timeout: Duration, ) -> Self { Router { - whatami, + // whatami, tables: Arc::new(TablesLock { tables: RwLock::new(Tables::new( zid, @@ -83,37 +82,16 @@ impl Router { gossip_multihop: bool, autoconnect: WhatAmIMatcher, ) { - let mut tables = zwrite!(self.tables.tables); - if router_full_linkstate | gossip { - tables.hat.routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - tables.hat.peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } + init( + &self.tables, + runtime, + router_full_linkstate, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + ) } pub fn new_primitives(&self, primitives: Arc) -> Arc { @@ -138,75 +116,14 @@ impl Router { &self, transport: TransportUnicast, ) -> ZResult> { - let ctrl_lock = zlock!(self.tables.ctrl_lock); - let mut tables = zwrite!(self.tables.tables); - let whatami = transport.get_whatami()?; - - let link_id = match (self.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => tables - .hat - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = tables.hat.peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } - } - _ => 0, - }; - - if tables.hat.full_net(WhatAmI::Router) && tables.hat.full_net(WhatAmI::Peer) { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } - - let handler = Arc::new(LinkStateInterceptor::new( + Ok(Arc::new(LinkStateInterceptor::new( transport.clone(), self.tables.clone(), Face { tables: self.tables.clone(), - state: tables - .open_net_face( - transport.get_zid().unwrap(), - whatami, - #[cfg(feature = "stats")] - transport.get_stats().unwrap(), - Arc::new(Mux::new(transport)), - link_id, - ) - .upgrade() - .unwrap(), + state: new_transport_unicast(&self.tables, transport)?, }, - )); - - match (self.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - tables - .hat - .schedule_compute_trees(self.tables.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if tables.hat.full_net(WhatAmI::Peer) { - tables - .hat - .schedule_compute_trees(self.tables.clone(), WhatAmI::Peer); - } - } - _ => (), - } - drop(tables); - drop(ctrl_lock); - Ok(handler) + ))) } pub fn new_transport_multicast(&self, transport: TransportMulticast) -> ZResult<()> { @@ -282,109 +199,7 @@ impl TransportPeerEventHandler for LinkStateInterceptor { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { log::trace!("Recv {:?}", msg); match msg.body { - NetworkBody::OAM(oam) => { - if oam.id == OAM_LINKSTATE { - if let ZExtBody::ZBuf(buf) = oam.body { - if let Ok(zid) = self.transport.get_zid() { - use zenoh_buffers::reader::HasReader; - use zenoh_codec::RCodec; - let codec = Zenoh080Routing::new(); - let mut reader = buf.reader(); - let list: LinkStateList = codec.read(&mut reader).unwrap(); - - let ctrl_lock = zlock!(self.tables.ctrl_lock); - let mut tables = zwrite!(self.tables.tables); - let whatami = self.transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in tables - .hat - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Router, - ); - queries_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Router, - ); - } - - if tables.hat.full_net(WhatAmI::Peer) { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } - - tables.hat.schedule_compute_trees( - self.tables.clone(), - WhatAmI::Router, - ); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = tables.hat.peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if tables.hat.full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } - - tables.hat.schedule_compute_trees( - self.tables.clone(), - WhatAmI::Peer, - ); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - &mut tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - &mut tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), - }; - drop(tables); - drop(ctrl_lock); - } - } - } - - Ok(()) - } + NetworkBody::OAM(oam) => handle_oam(&self.tables, oam, &self.transport), _ => self.demux.handle_message(msg), } } @@ -395,63 +210,7 @@ impl TransportPeerEventHandler for LinkStateInterceptor { fn closing(&self) { self.demux.closing(); - let tables_ref = self.tables.clone(); - match (self.transport.get_zid(), self.transport.get_whatami()) { - (Ok(zid), Ok(whatami)) => { - let ctrl_lock = zlock!(tables_ref.ctrl_lock); - let mut tables = zwrite!(tables_ref.tables); - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in - tables.hat.routers_net.as_mut().unwrap().remove_link(&zid) - { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - } - - if tables.hat.full_net(WhatAmI::Peer) { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } - - tables - .hat - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if tables.hat.full_net(WhatAmI::Peer) { - for (_, removed_node) in - tables.hat.peers_net.as_mut().unwrap().remove_link(&zid) - { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - tables.hat.shared_nodes = shared_nodes( - tables.hat.routers_net.as_ref().unwrap(), - tables.hat.peers_net.as_ref().unwrap(), - ); - } - - tables - .hat - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = tables.hat.peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), - }; - drop(tables); - drop(ctrl_lock); - } - (_, _) => log::error!("Closed transport in session closing!"), - } + let _ = closing(&self.tables, &self.transport); } fn closed(&self) {} diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index f2396eec86..5094962046 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -523,94 +523,94 @@ zenoh_build{{version="{}"}} 1 } } -fn routers_linkstate_data(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/routers", context.zid_str) - .try_into() - .unwrap(); - - let tables = zread!(context.runtime.router.tables.tables); - - if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from( - tables - .hat - .routers_net - .as_ref() - .map(|net| net.dot()) - .unwrap_or_else(|| "graph {}".to_string()) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), - ))) - .res() - { - log::error!("Error sending AdminSpace reply: {:?}", e); - } +fn routers_linkstate_data(_context: &AdminContext, _query: Query) { + // let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/routers", context.zid_str) + // .try_into() + // .unwrap(); + + // let tables = zread!(context.runtime.router.tables.tables); + + // if let Err(e) = query + // .reply(Ok(Sample::new( + // reply_key, + // Value::from( + // tables + // .hat + // .routers_net + // .as_ref() + // .map(|net| net.dot()) + // .unwrap_or_else(|| "graph {}".to_string()) + // .as_bytes() + // .to_vec(), + // ) + // .encoding(KnownEncoding::TextPlain.into()), + // ))) + // .res() + // { + // log::error!("Error sending AdminSpace reply: {:?}", e); + // } } -fn peers_linkstate_data(context: &AdminContext, query: Query) { - let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/peers", context.zid_str) - .try_into() - .unwrap(); - - let tables = zread!(context.runtime.router.tables.tables); - - if let Err(e) = query - .reply(Ok(Sample::new( - reply_key, - Value::from( - tables - .hat - .peers_net - .as_ref() - .map(|net| net.dot()) - .unwrap_or_else(|| "graph {}".to_string()) - .as_bytes() - .to_vec(), - ) - .encoding(KnownEncoding::TextPlain.into()), - ))) - .res() - { - log::error!("Error sending AdminSpace reply: {:?}", e); - } +fn peers_linkstate_data(_context: &AdminContext, _query: Query) { + // let reply_key: OwnedKeyExpr = format!("@/router/{}/linkstate/peers", context.zid_str) + // .try_into() + // .unwrap(); + + // let tables = zread!(context.runtime.router.tables.tables); + + // if let Err(e) = query + // .reply(Ok(Sample::new( + // reply_key, + // Value::from( + // tables + // .hat + // .peers_net + // .as_ref() + // .map(|net| net.dot()) + // .unwrap_or_else(|| "graph {}".to_string()) + // .as_bytes() + // .to_vec(), + // ) + // .encoding(KnownEncoding::TextPlain.into()), + // ))) + // .res() + // { + // log::error!("Error sending AdminSpace reply: {:?}", e); + // } } -fn subscribers_data(context: &AdminContext, query: Query) { - let tables = zread!(context.runtime.router.tables.tables); - for sub in tables.hat.router_subs.iter() { - let key = KeyExpr::try_from(format!( - "@/router/{}/subscriber/{}", - context.zid_str, - sub.expr() - )) - .unwrap(); - if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); - } - } - } +fn subscribers_data(_context: &AdminContext, _query: Query) { + // let tables = zread!(context.runtime.router.tables.tables); + // for sub in tables.hat.router_subs.iter() { + // let key = KeyExpr::try_from(format!( + // "@/router/{}/subscriber/{}", + // context.zid_str, + // sub.expr() + // )) + // .unwrap(); + // if query.key_expr().intersects(&key) { + // if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + // log::error!("Error sending AdminSpace reply: {:?}", e); + // } + // } + // } } -fn queryables_data(context: &AdminContext, query: Query) { - let tables = zread!(context.runtime.router.tables.tables); - for qabl in tables.hat.router_qabls.iter() { - let key = KeyExpr::try_from(format!( - "@/router/{}/queryable/{}", - context.zid_str, - qabl.expr() - )) - .unwrap(); - if query.key_expr().intersects(&key) { - if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { - log::error!("Error sending AdminSpace reply: {:?}", e); - } - } - } +fn queryables_data(_context: &AdminContext, _query: Query) { + // let tables = zread!(context.runtime.router.tables.tables); + // for qabl in tables.hat.router_qabls.iter() { + // let key = KeyExpr::try_from(format!( + // "@/router/{}/queryable/{}", + // context.zid_str, + // qabl.expr() + // )) + // .unwrap(); + // if query.key_expr().intersects(&key) { + // if let Err(e) = query.reply(Ok(Sample::new(key, Value::empty()))).res() { + // log::error!("Error sending AdminSpace reply: {:?}", e); + // } + // } + // } } fn plugins_status(context: &AdminContext, query: Query) { From 239d8cb140a75e8f60f181f139dbe5a72298a001 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 7 Nov 2023 16:40:08 +0100 Subject: [PATCH 005/122] Renaming --- zenoh/src/net/routing/hat/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index a54837f5ab..9029298d34 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -183,7 +183,7 @@ impl HatTables { .unwrap_or(false) } - fn schedule_compute_trees_(&mut self, tables_ref: Arc, net_type: WhatAmI) { + fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { log::trace!("Schedule computations"); if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) @@ -446,13 +446,13 @@ pub(crate) fn new_transport_unicast( match (tables.whatami, whatami) { (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) | (WhatAmI::Peer, WhatAmI::Peer) => { if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); } } _ => (), @@ -500,7 +500,7 @@ pub(crate) fn handle_oam( } hat_mut!(tables) - .schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) @@ -529,7 +529,7 @@ pub(crate) fn handle_oam( } hat_mut!(tables) - .schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); } else { for (_, updated_node) in changes.updated_nodes { pubsub_linkstate_change( @@ -581,7 +581,7 @@ pub(crate) fn closing(tables_ref: &Arc, transport: &TransportUnicast ); } - hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Router); + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } (WhatAmI::Router, WhatAmI::Peer) | (WhatAmI::Peer, WhatAmI::Router) @@ -604,7 +604,7 @@ pub(crate) fn closing(tables_ref: &Arc, transport: &TransportUnicast ); } - hat_mut!(tables).schedule_compute_trees_(tables_ref.clone(), WhatAmI::Peer); + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { net.remove_link(&zid); } From b0f999b1a2068d481e33af804a83294cf6f1b078 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 7 Nov 2023 16:50:39 +0100 Subject: [PATCH 006/122] Visibility --- zenoh/src/net/routing/dispatcher/face.rs | 5 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 4 +- zenoh/src/net/routing/dispatcher/queries.rs | 5 +- zenoh/src/net/routing/dispatcher/tables.rs | 4 +- zenoh/src/net/routing/hat/mod.rs | 48 +++++------ zenoh/src/net/routing/hat/network.rs | 90 ++++++++++----------- zenoh/src/net/routing/hat/pubsub.rs | 4 +- zenoh/src/net/routing/hat/queries.rs | 16 ++-- zenoh/src/net/routing/router.rs | 2 - 9 files changed, 88 insertions(+), 90 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 11f4b2d17a..4cfa75f2be 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -1,5 +1,3 @@ -use crate::net::routing::hat::HatFace; - // // Copyright (c) 2023 ZettaScale Technology // @@ -16,6 +14,9 @@ use crate::net::routing::hat::HatFace; use super::super::router::*; use super::tables::TablesLock; use super::{resource::*, tables}; +use crate::net::routing::hat::pubsub::*; +use crate::net::routing::hat::queries::*; +use crate::net::routing::hat::HatFace; use std::any::Any; use std::collections::HashMap; use std::fmt; diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 97529b8f69..8dd3076b72 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -14,9 +14,9 @@ use super::super::hat::pubsub::{compute_data_route, compute_data_routes, compute_data_routes_}; use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; -use super::tables::{compute_matching_pulls, RoutingContext, RoutingExpr, Tables}; +use super::tables::{RoutingContext, RoutingExpr, Tables}; use crate::net::routing::hat::map_routing_context; -use crate::net::routing::hat::pubsub::{egress_filter, ingress_filter}; +use crate::net::routing::hat::pubsub::{compute_matching_pulls, egress_filter, ingress_filter}; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 856fec3e3c..6ca3ed31c9 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -1,5 +1,3 @@ -use crate::net::routing::hat::pubsub::ingress_filter; - // // Copyright (c) 2023 ZettaScale Technology // @@ -19,9 +17,10 @@ use super::super::hat::queries::compute_query_routes; use super::super::hat::queries::compute_query_routes_; use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; -use super::tables::egress_filter; use super::tables::RoutingContext; use super::tables::{RoutingExpr, Tables, TablesLock}; +use crate::net::routing::hat::pubsub::egress_filter; +use crate::net::routing::hat::pubsub::ingress_filter; use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index ed84f46c0f..8adccd522a 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -13,8 +13,8 @@ use crate::net::routing::hat::HatTables; // Contributors: // ZettaScale Zenoh Team, // -pub use super::super::hat::pubsub::*; -pub use super::super::hat::queries::*; +use super::super::hat::pubsub::*; +use super::super::hat::queries::*; use super::face::FaceState; pub use super::pubsub::*; pub use super::queries::*; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 9029298d34..cf0fb26047 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -19,12 +19,12 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ network::Network, - pubsub::undeclare_client_subscription, + pubsub::{compute_data_routes_, undeclare_client_subscription}, queries::{compute_query_routes_, undeclare_client_queryable}, }; use super::dispatcher::{ face::FaceState, - tables::{compute_data_routes_, Resource, RoutingContext, Tables, TablesLock}, + tables::{Resource, RoutingContext, Tables, TablesLock}, }; use crate::{ hat, hat_mut, @@ -63,16 +63,16 @@ zconfigurable! { } pub struct HatTables { - pub(crate) router_subs: HashSet>, - pub(crate) peer_subs: HashSet>, - pub(crate) router_qabls: HashSet>, - pub(crate) peer_qabls: HashSet>, - pub(crate) routers_net: Option, - pub(crate) peers_net: Option, - pub(crate) shared_nodes: Vec, - pub(crate) routers_trees_task: Option>, - pub(crate) peers_trees_task: Option>, - pub(crate) router_peers_failover_brokering: bool, + router_subs: HashSet>, + peer_subs: HashSet>, + router_qabls: HashSet>, + peer_qabls: HashSet>, + routers_net: Option, + peers_net: Option, + shared_nodes: Vec, + routers_trees_task: Option>, + peers_trees_task: Option>, + router_peers_failover_brokering: bool, } impl HatTables { @@ -92,7 +92,7 @@ impl HatTables { } #[inline] - pub(crate) fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { match net_type { WhatAmI::Router => self.routers_net.as_ref(), WhatAmI::Peer => self.peers_net.as_ref(), @@ -101,7 +101,7 @@ impl HatTables { } #[inline] - pub(crate) fn full_net(&self, net_type: WhatAmI) -> bool { + fn full_net(&self, net_type: WhatAmI) -> bool { match net_type { WhatAmI::Router => self .routers_net @@ -118,7 +118,7 @@ impl HatTables { } #[inline] - pub(crate) fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { self.peers_net .as_ref() .unwrap() @@ -134,7 +134,7 @@ impl HatTables { } #[inline] - pub(crate) fn elect_router<'a>( + fn elect_router<'a>( &'a self, self_zid: &'a ZenohId, key_expr: &str, @@ -168,13 +168,13 @@ impl HatTables { } #[inline] - pub(crate) fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { // if source_links is empty then gossip is probably disabled in source peer !source_links.is_empty() && !source_links.contains(&dest) } #[inline] - pub(crate) fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { self.router_peers_failover_brokering && self .peers_net @@ -229,7 +229,7 @@ pub(crate) struct HatContext { } impl HatContext { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { router_subs: HashSet::new(), peer_subs: HashSet::new(), @@ -240,14 +240,14 @@ impl HatContext { } pub(crate) struct HatFace { - pub(crate) local_subs: HashSet>, - pub(crate) remote_subs: HashSet>, - pub(crate) local_qabls: HashMap, QueryableInfo>, - pub(crate) remote_qabls: HashSet>, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, } impl HatFace { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { local_subs: HashSet::new(), remote_subs: HashSet::new(), diff --git a/zenoh/src/net/routing/hat/network.rs b/zenoh/src/net/routing/hat/network.rs index 43f94e4f9d..61b3f6c78a 100644 --- a/zenoh/src/net/routing/hat/network.rs +++ b/zenoh/src/net/routing/hat/network.rs @@ -38,12 +38,12 @@ struct Details { } #[derive(Clone)] -pub(crate) struct Node { - pub(crate) zid: ZenohId, - pub(crate) whatami: Option, - pub(crate) locators: Option>, - pub(crate) sn: u64, - pub(crate) links: Vec, +pub(super) struct Node { + pub(super) zid: ZenohId, + pub(super) whatami: Option, + pub(super) locators: Option>, + pub(super) sn: u64, + pub(super) links: Vec, } impl std::fmt::Debug for Node { @@ -52,8 +52,8 @@ impl std::fmt::Debug for Node { } } -pub(crate) struct Link { - pub(crate) transport: TransportUnicast, +pub(super) struct Link { + pub(super) transport: TransportUnicast, zid: ZenohId, mappings: VecMap, local_mappings: VecMap, @@ -71,57 +71,57 @@ impl Link { } #[inline] - pub(crate) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { self.mappings.insert(psid.try_into().unwrap(), zid); } #[inline] - pub(crate) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { self.mappings.get((*psid).try_into().unwrap()) } #[inline] - pub(crate) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { + pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { self.local_mappings .insert(psid.try_into().unwrap(), local_psid); } #[inline] - pub(crate) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { + pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { self.local_mappings.get((*psid).try_into().unwrap()) } } -pub(crate) struct Changes { - pub(crate) updated_nodes: Vec<(NodeIndex, Node)>, - pub(crate) removed_nodes: Vec<(NodeIndex, Node)>, +pub(super) struct Changes { + pub(super) updated_nodes: Vec<(NodeIndex, Node)>, + pub(super) removed_nodes: Vec<(NodeIndex, Node)>, } #[derive(Clone)] -pub(crate) struct Tree { - pub(crate) parent: Option, - pub(crate) childs: Vec, - pub(crate) directions: Vec>, +pub(super) struct Tree { + pub(super) parent: Option, + pub(super) childs: Vec, + pub(super) directions: Vec>, } -pub(crate) struct Network { - pub(crate) name: String, - pub(crate) full_linkstate: bool, - pub(crate) router_peers_failover_brokering: bool, - pub(crate) gossip: bool, - pub(crate) gossip_multihop: bool, - pub(crate) autoconnect: WhatAmIMatcher, - pub(crate) idx: NodeIndex, - pub(crate) links: VecMap, - pub(crate) trees: Vec, - pub(crate) distances: Vec, - pub(crate) graph: petgraph::stable_graph::StableUnGraph, - pub(crate) runtime: Runtime, +pub(super) struct Network { + pub(super) name: String, + pub(super) full_linkstate: bool, + pub(super) router_peers_failover_brokering: bool, + pub(super) gossip: bool, + pub(super) gossip_multihop: bool, + pub(super) autoconnect: WhatAmIMatcher, + pub(super) idx: NodeIndex, + pub(super) links: VecMap, + pub(super) trees: Vec, + pub(super) distances: Vec, + pub(super) graph: petgraph::stable_graph::StableUnGraph, + pub(super) runtime: Runtime, } impl Network { #[allow(clippy::too_many_arguments)] - pub(crate) fn new( + pub(super) fn new( name: String, zid: ZenohId, runtime: Runtime, @@ -161,7 +161,7 @@ impl Network { } //noinspection ALL - // pub(crate) fn dot(&self) -> String { + // pub(super) fn dot(&self) -> String { // std::format!( // "{:?}", // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) @@ -169,29 +169,29 @@ impl Network { // } #[inline] - pub(crate) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { self.graph.node_weights().find(|weight| weight.zid == *zid) } #[inline] - pub(crate) fn get_idx(&self, zid: &ZenohId) -> Option { + pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { self.graph .node_indices() .find(|idx| self.graph[*idx].zid == *zid) } #[inline] - pub(crate) fn get_link(&self, id: usize) -> Option<&Link> { + pub(super) fn get_link(&self, id: usize) -> Option<&Link> { self.links.get(id) } #[inline] - pub(crate) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { self.links.values().find(|link| link.zid == *zid) } #[inline] - pub(crate) fn get_local_context( + pub(super) fn get_local_context( &self, context: RoutingContext, link_id: usize, @@ -345,7 +345,7 @@ impl Network { self.graph.update_edge(idx1, idx2, weight); } - pub(crate) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); let graph = &self.graph; @@ -698,7 +698,7 @@ impl Network { } } - pub(crate) fn add_link(&mut self, transport: TransportUnicast) -> usize { + pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { let free_index = { let mut i = 0; while self.links.contains_key(i) { @@ -810,7 +810,7 @@ impl Network { free_index } - pub(crate) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { log::trace!("{} remove_link {}", self.name, zid); self.links.retain(|_, link| link.zid != *zid); self.graph[self.idx].links.retain(|link| *link != *zid); @@ -889,7 +889,7 @@ impl Network { removed } - pub(crate) fn compute_trees(&mut self) -> Vec> { + pub(super) fn compute_trees(&mut self) -> Vec> { let indexes = self.graph.node_indices().collect::>(); let max_idx = indexes.iter().max().unwrap(); @@ -990,7 +990,7 @@ impl Network { } #[inline] - pub(crate) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { self.get_node(&node) .map(|node| &node.links[..]) .unwrap_or_default() @@ -998,7 +998,7 @@ impl Network { } #[inline] -pub(crate) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { net1.graph .node_references() .filter_map(|(_, node1)| { diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index c6eda3adda..76e82c7702 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -511,7 +511,7 @@ fn declare_client_subscription( } } -pub fn declare_subscription( +pub(crate) fn declare_subscription( tables: &TablesLock, face: &mut Arc, expr: &WireExpr, @@ -934,7 +934,7 @@ fn forget_client_subscription( } } -pub fn forget_subscription( +pub(crate) fn forget_subscription( tables: &TablesLock, face: &mut Arc, expr: &WireExpr, diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/queries.rs index dd2d3519b4..43c2eb737e 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -369,7 +369,7 @@ fn register_router_queryable( propagate_simple_queryable(tables, res, face); } -pub fn declare_router_queryable( +fn declare_router_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -456,7 +456,7 @@ fn register_peer_queryable( } } -pub fn declare_peer_queryable( +fn declare_peer_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -541,7 +541,7 @@ fn register_client_queryable( face_hat_mut!(face).remote_qabls.insert(res.clone()); } -pub fn declare_client_queryable( +fn declare_client_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -627,7 +627,7 @@ pub fn declare_client_queryable( } } -pub fn declare_queryable( +pub(crate) fn declare_queryable( tables: &TablesLock, face: &mut Arc, expr: &WireExpr, @@ -852,7 +852,7 @@ fn undeclare_router_queryable( } } -pub fn forget_router_queryable( +fn forget_router_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -914,7 +914,7 @@ fn undeclare_peer_queryable( } } -pub fn forget_peer_queryable( +fn forget_peer_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -1029,7 +1029,7 @@ pub(crate) fn undeclare_client_queryable( } } -pub fn forget_client_queryable( +fn forget_client_queryable( tables: &TablesLock, rtables: RwLockReadGuard, face: &mut Arc, @@ -1063,7 +1063,7 @@ pub fn forget_client_queryable( } } -pub fn forget_queryable( +pub(crate) fn forget_queryable( tables: &TablesLock, face: &mut Arc, expr: &WireExpr, diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 5788d053c0..833720c58c 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -20,8 +20,6 @@ use super::dispatcher::tables::TablesLock; use super::hat::closing; use super::hat::init; use super::hat::new_transport_unicast; -pub use super::hat::pubsub::*; -pub use super::hat::queries::*; use super::runtime::Runtime; use crate::net::routing::hat::handle_oam; use std::any::Any; From 2a5466bad77de0f964f1dabb9fc6ef33ef76e8d8 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 13 Nov 2023 15:10:03 +0100 Subject: [PATCH 007/122] Move ingress/egress filters out of pubsub --- zenoh/src/net/routing/dispatcher/pubsub.rs | 4 +- zenoh/src/net/routing/dispatcher/queries.rs | 3 +- zenoh/src/net/routing/hat/mod.rs | 47 ++++++++++++++++++++- zenoh/src/net/routing/hat/pubsub.rs | 45 -------------------- 4 files changed, 49 insertions(+), 50 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 8dd3076b72..cd43a1ccab 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -15,8 +15,8 @@ use super::super::hat::pubsub::{compute_data_route, compute_data_routes, compute use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{RoutingContext, RoutingExpr, Tables}; -use crate::net::routing::hat::map_routing_context; -use crate::net::routing::hat::pubsub::{compute_matching_pulls, egress_filter, ingress_filter}; +use crate::net::routing::hat::pubsub::compute_matching_pulls; +use crate::net::routing::hat::{egress_filter, ingress_filter, map_routing_context}; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 6ca3ed31c9..2a2a97e91a 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -19,8 +19,7 @@ use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; use super::tables::RoutingContext; use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::pubsub::egress_filter; -use crate::net::routing::hat::pubsub::ingress_filter; +use crate::net::routing::hat::{egress_filter, ingress_filter}; use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index cf0fb26047..9eeca30c05 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -24,7 +24,7 @@ use self::{ }; use super::dispatcher::{ face::FaceState, - tables::{Resource, RoutingContext, Tables, TablesLock}, + tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, }; use crate::{ hat, hat_mut, @@ -758,3 +758,48 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O } } } + +#[inline] +pub(crate) fn ingress_filter(tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) +} + +#[inline] +pub(crate) fn egress_filter( + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, +) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false +} diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index 76e82c7702..ea67068b9f 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -1608,48 +1608,3 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); } } - -#[inline] -pub(crate) fn ingress_filter(tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) -} - -#[inline] -pub(crate) fn egress_filter( - tables: &Tables, - src_face: &FaceState, - out_face: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != out_face.id - && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); - } - false -} From 0f2fa242cd7f93ad5544cb5269c803a3b2778401 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 15 Nov 2023 11:30:25 +0100 Subject: [PATCH 008/122] Make hat abstract --- zenoh/src/net/routing/dispatcher/face.rs | 14 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 37 +- zenoh/src/net/routing/dispatcher/queries.rs | 39 +- zenoh/src/net/routing/dispatcher/resource.rs | 23 +- zenoh/src/net/routing/dispatcher/tables.rs | 85 +- zenoh/src/net/routing/hat/mod.rs | 933 ++++++++++++------- zenoh/src/net/routing/hat/pubsub.rs | 584 ++++++------ zenoh/src/net/routing/hat/queries.rs | 606 ++++++------ zenoh/src/net/routing/router.rs | 40 +- 9 files changed, 1284 insertions(+), 1077 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 4cfa75f2be..c327f2df13 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -14,9 +14,6 @@ use super::super::router::*; use super::tables::TablesLock; use super::{resource::*, tables}; -use crate::net::routing::hat::pubsub::*; -use crate::net::routing::hat::queries::*; -use crate::net::routing::hat::HatFace; use std::any::Any; use std::collections::HashMap; use std::fmt; @@ -55,6 +52,7 @@ impl FaceState { primitives: Arc, link_id: usize, mcast_group: Option, + hat: Box, ) -> Arc { Arc::new(FaceState { id, @@ -69,7 +67,7 @@ impl FaceState { next_qid: 0, pending_queries: HashMap::new(), mcast_group, - hat: Box::new(HatFace::new()), + hat, }) } @@ -118,7 +116,7 @@ impl Primitives for Face { unregister_expr(&self.tables, &mut self.state.clone(), m.id); } zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { - declare_subscription( + ctrl_lock.declare_subscription( &self.tables, &mut self.state.clone(), &m.wire_expr, @@ -127,7 +125,7 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { - forget_subscription( + ctrl_lock.forget_subscription( &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, @@ -135,7 +133,7 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { - declare_queryable( + ctrl_lock.declare_queryable( &self.tables, &mut self.state.clone(), &m.wire_expr, @@ -144,7 +142,7 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { - forget_queryable( + ctrl_lock.forget_queryable( &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index cd43a1ccab..3b9e7f20d1 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -11,12 +11,9 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::pubsub::{compute_data_route, compute_data_routes, compute_data_routes_}; use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{RoutingContext, RoutingExpr, Tables}; -use crate::net::routing::hat::pubsub::compute_matching_pulls; -use crate::net::routing::hat::{egress_filter, ingress_filter, map_routing_context}; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; @@ -28,7 +25,7 @@ use zenoh_protocol::{ use zenoh_sync::get_mut_unchecked; pub(crate) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_data_routes(tables, res); + tables.hat_code.clone().compute_data_routes(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { compute_data_routes_from(tables, child); @@ -41,11 +38,14 @@ pub(crate) fn compute_matches_data_routes_<'a>( ) -> Vec<(Arc, DataRoutes)> { let mut routes = vec![]; if res.context.is_some() { - routes.push((res.clone(), compute_data_routes_(tables, res))); + routes.push(( + res.clone(), + tables.hat_code.compute_data_routes_(tables, res), + )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_data_routes_(tables, &match_); + let match_routes = tables.hat_code.compute_data_routes_(tables, &match_); routes.push((match_, match_routes)); } } @@ -202,7 +202,7 @@ fn get_matching_pulls( res.as_ref() .and_then(|res| res.context.as_ref()) .map(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) + .unwrap_or_else(|| tables.hat_code.compute_matching_pulls(tables, expr)) } macro_rules! cache_data { @@ -272,12 +272,20 @@ pub fn full_reentrant_route_data( inc_stats!(face, rx, admin, payload) } - if ingress_filter(&tables, face, &mut expr) { + if tables.hat_code.ingress_filter(&tables, face, &mut expr) { let res = Resource::get_resource(&prefix, expr.suffix); // let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let local_context = map_routing_context(&tables, face, routing_context); - let route = compute_data_route(&tables, &mut expr, local_context, face.whatami); + let local_context = + tables + .hat_code + .map_routing_context(&tables, face, routing_context); + let route = tables.hat_code.compute_data_route( + &tables, + &mut expr, + local_context, + face.whatami, + ); let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); @@ -286,7 +294,10 @@ pub fn full_reentrant_route_data( if route.len() == 1 && matching_pulls.len() == 0 { let (outface, key_expr, context) = route.values().next().unwrap(); - if egress_filter(&tables, face, outface, &mut expr) { + if tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) + { drop(tables); #[cfg(feature = "stats")] if !admin { @@ -314,7 +325,9 @@ pub fn full_reentrant_route_data( let route = route .values() .filter(|(outface, _key_expr, _context)| { - egress_filter(&tables, face, outface, &mut expr) + tables + .hat_code + .egress_filter(&tables, face, outface, &mut expr) }) .cloned() .collect::>(); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 2a2a97e91a..c12feb901e 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -11,15 +11,10 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::queries::compute_local_replies; -use super::super::hat::queries::compute_query_route; -use super::super::hat::queries::compute_query_routes; -use super::super::hat::queries::compute_query_routes_; use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; use super::tables::RoutingContext; use super::tables::{RoutingExpr, Tables, TablesLock}; -use crate::net::routing::hat::{egress_filter, ingress_filter}; use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; @@ -41,7 +36,7 @@ pub(crate) struct Query { } pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_query_routes(tables, res); + tables.hat_code.clone().compute_query_routes(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { compute_query_routes_from(tables, child); @@ -54,11 +49,14 @@ pub(crate) fn compute_matches_query_routes_( ) -> Vec<(Arc, QueryRoutes)> { let mut routes = vec![]; if res.context.is_some() { - routes.push((res.clone(), compute_query_routes_(tables, res))); + routes.push(( + res.clone(), + tables.hat_code.compute_query_routes_(tables, res), + )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_query_routes_(tables, &match_); + let match_routes = tables.hat_code.compute_query_routes_(tables, &match_); routes.push((match_, match_routes)); } } @@ -88,7 +86,10 @@ fn compute_final_route( TargetType::All => { let mut route = HashMap::new(); for qabl in qabls.iter() { - if egress_filter(tables, src_face, &qabl.direction.0, expr) { + if tables + .hat_code + .egress_filter(tables, src_face, &qabl.direction.0, expr) + { #[cfg(feature = "complete_n")] { route.entry(qabl.direction.0.id).or_insert_with(|| { @@ -112,7 +113,11 @@ fn compute_final_route( TargetType::AllComplete => { let mut route = HashMap::new(); for qabl in qabls.iter() { - if qabl.complete > 0 && egress_filter(tables, src_face, &qabl.direction.0, expr) { + if qabl.complete > 0 + && tables + .hat_code + .egress_filter(tables, src_face, &qabl.direction.0, expr) + { #[cfg(feature = "complete_n")] { route.entry(qabl.direction.0.id).or_insert_with(|| { @@ -442,9 +447,14 @@ pub fn route_query( inc_req_stats!(face, rx, admin, body) } - if ingress_filter(&rtables, face, &mut expr) { + if rtables.hat_code.ingress_filter(&rtables, face, &mut expr) { // let res = Resource::get_resource(&prefix, expr.suffix); - let route = compute_query_route(&rtables, &mut expr, routing_context, face.whatami); + let route = rtables.hat_code.compute_query_route( + &rtables, + &mut expr, + routing_context, + face.whatami, + ); let query = Arc::new(Query { src_face: face.clone(), @@ -453,7 +463,10 @@ pub fn route_query( let queries_lock = zwrite!(tables_ref.queries_lock); let route = compute_final_route(&rtables, &route, face, &mut expr, &target, query); - let local_replies = compute_local_replies(&rtables, &prefix, expr.suffix, face); + let local_replies = + rtables + .hat_code + .compute_local_replies(&rtables, &prefix, expr.suffix, face); let zid = rtables.zid; drop(queries_lock); diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 41ffaf7a52..b514043cab 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -13,7 +13,6 @@ // use super::face::FaceState; use super::tables::{Tables, TablesLock}; -use crate::net::routing::hat::HatContext; use std::any::Any; use std::collections::HashMap; use std::convert::TryInto; @@ -92,11 +91,11 @@ pub(crate) struct ResourceContext { } impl ResourceContext { - fn new() -> ResourceContext { + fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), matching_pulls: Arc::new(Vec::new()), - hat: Box::new(HatContext::new()), + hat, valid_data_routes: false, routers_data_routes: Vec::new(), peers_data_routes: Vec::new(), @@ -375,12 +374,12 @@ impl Resource { } pub fn make_resource( - _tables: &mut Tables, + tables: &mut Tables, from: &mut Arc, suffix: &str, ) -> Arc { if suffix.is_empty() { - Resource::upgrade_resource(from); + Resource::upgrade_resource(from, tables.hat_code.new_resource()); from.clone() } else if let Some(stripped_suffix) = suffix.strip_prefix('/') { let (chunk, rest) = match stripped_suffix.find('/') { @@ -389,13 +388,13 @@ impl Resource { }; match get_mut_unchecked(from).childs.get_mut(chunk) { - Some(res) => Resource::make_resource(_tables, res, rest), + Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); if log::log_enabled!(log::Level::Debug) && rest.is_empty() { log::debug!("Register resource {}", new.expr()); } - let res = Resource::make_resource(_tables, &mut new, rest); + let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) .childs .insert(String::from(chunk), new); @@ -405,7 +404,7 @@ impl Resource { } else { match from.parent.clone() { Some(mut parent) => { - Resource::make_resource(_tables, &mut parent, &[&from.suffix, suffix].concat()) + Resource::make_resource(tables, &mut parent, &[&from.suffix, suffix].concat()) } None => { let (chunk, rest) = match suffix[1..].find('/') { @@ -414,13 +413,13 @@ impl Resource { }; match get_mut_unchecked(from).childs.get_mut(chunk) { - Some(res) => Resource::make_resource(_tables, res, rest), + Some(res) => Resource::make_resource(tables, res, rest), None => { let mut new = Arc::new(Resource::new(from, chunk, None)); if log::log_enabled!(log::Level::Debug) && rest.is_empty() { log::debug!("Register resource {}", new.expr()); } - let res = Resource::make_resource(_tables, &mut new, rest); + let res = Resource::make_resource(tables, &mut new, rest); get_mut_unchecked(from) .childs .insert(String::from(chunk), new); @@ -676,9 +675,9 @@ impl Resource { } } - pub fn upgrade_resource(res: &mut Arc) { + pub fn upgrade_resource(res: &mut Arc, hat: Box) { if res.context.is_none() { - get_mut_unchecked(res).context = Some(ResourceContext::new()); + get_mut_unchecked(res).context = Some(ResourceContext::new(hat)); } } } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 8adccd522a..54867b493e 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -1,5 +1,4 @@ -use crate::net::routing::hat::HatTables; - +use crate::net::routing::hat::HatBaseTrait; // // Copyright (c) 2023 ZettaScale Technology // @@ -13,12 +12,12 @@ use crate::net::routing::hat::HatTables; // Contributors: // ZettaScale Zenoh Team, // -use super::super::hat::pubsub::*; -use super::super::hat::queries::*; use super::face::FaceState; pub use super::pubsub::*; pub use super::queries::*; pub use super::resource::*; +use crate::net::routing::hat::HatCode; +use crate::net::routing::hat::HatTrait; use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Weak}; @@ -29,7 +28,6 @@ use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; use zenoh_protocol::network::Mapping; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; -use zenoh_transport::Primitives; // use zenoh_collections::Timer; use zenoh_sync::get_mut_unchecked; @@ -73,6 +71,7 @@ pub struct Tables { pub(crate) mcast_faces: Vec>, pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, + pub(crate) hat_code: Arc, // TODO make this a Box } impl Tables { @@ -84,6 +83,7 @@ impl Tables { router_peers_failover_brokering: bool, _queries_default_timeout: Duration, ) -> Self { + let hat_code = Arc::new(HatCode {}); Tables { zid, whatami, @@ -97,7 +97,8 @@ impl Tables { mcast_groups: vec![], mcast_faces: vec![], pull_caches_lock: Mutex::new(()), - hat: Box::new(HatTables::new(router_peers_failover_brokering)), + hat: hat_code.new_tables(router_peers_failover_brokering), + hat_code, } } @@ -129,75 +130,9 @@ impl Tables { self.faces.values().find(|face| face.zid == *zid) } - pub(crate) fn open_net_face( - &mut self, - zid: ZenohId, - whatami: WhatAmI, - #[cfg(feature = "stats")] stats: Arc, - primitives: Arc, - link_id: usize, - ) -> Weak { - let fid = self.face_counter; - self.face_counter += 1; - let mut newface = self - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - primitives.clone(), - link_id, - None, - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(self, &mut newface); - queries_new_face(self, &mut newface); - - Arc::downgrade(&newface) - } - - pub fn open_face( - &mut self, - zid: ZenohId, - whatami: WhatAmI, - primitives: Arc, - ) -> Weak { - let fid = self.face_counter; - self.face_counter += 1; - let mut newface = self - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - None, - primitives.clone(), - 0, - None, - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(self, &mut newface); - queries_new_face(self, &mut newface); - - Arc::downgrade(&newface) - } - fn compute_routes(&mut self, res: &mut Arc) { - compute_data_routes(self, res); - compute_query_routes(self, res); + self.hat_code.clone().compute_data_routes(self, res); + self.hat_code.clone().compute_query_routes(self, res); } pub(crate) fn compute_matches_routes(&mut self, res: &mut Arc) { @@ -228,6 +163,6 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { pub struct TablesLock { pub tables: RwLock, - pub ctrl_lock: Mutex<()>, + pub(crate) ctrl_lock: Box>, pub queries_lock: RwLock<()>, } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 9eeca30c05..cef5816aa2 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -19,12 +19,15 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ network::Network, - pubsub::{compute_data_routes_, undeclare_client_subscription}, - queries::{compute_query_routes_, undeclare_client_queryable}, + pubsub::{pubsub_new_face, undeclare_client_subscription}, + queries::{queries_new_face, undeclare_client_queryable}, }; use super::dispatcher::{ face::FaceState, - tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, + tables::{ + DataRoutes, PullCaches, QueryRoutes, QueryTargetQablSet, Resource, Route, RoutingContext, + RoutingExpr, Tables, TablesLock, + }, }; use crate::{ hat, hat_mut, @@ -41,18 +44,25 @@ use crate::{ }; use async_std::task::JoinHandle; use std::{ + any::Any, collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::Hasher, sync::Arc, }; +use zenoh_buffers::ZBuf; use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + core::WireExpr, + network::{ + declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, + oam::id::OAM_LINKSTATE, + Oam, + }, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; -use zenoh_transport::{Mux, TransportUnicast}; +use zenoh_transport::{Mux, Primitives, TransportUnicast}; pub mod network; pub mod pubsub; @@ -62,7 +72,7 @@ zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } -pub struct HatTables { +struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, router_qabls: HashSet>, @@ -76,7 +86,7 @@ pub struct HatTables { } impl HatTables { - pub fn new(router_peers_failover_brokering: bool) -> Self { + fn new(router_peers_failover_brokering: bool) -> Self { Self { router_subs: HashSet::new(), peer_subs: HashSet::new(), @@ -221,7 +231,412 @@ impl HatTables { } } -pub(crate) struct HatContext { +pub(crate) struct HatCode {} + +impl HatBaseTrait for HatCode { + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) { + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + } + + fn new_local_face( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + primitives: Arc, + ) -> ZResult> { + let fid = tables.face_counter; + tables.face_counter += 1; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + tables.zid, + WhatAmI::Client, + #[cfg(feature = "stats")] + None, + primitives.clone(), + 0, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + Ok(newface) + } + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: TransportUnicast, + ) -> ZResult> { + let whatami = transport.get_whatami()?; + + let link_id = match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + let fid = tables.face_counter; + tables.face_counter += 1; + let zid = transport.get_zid()?; + #[cfg(feature = "stats")] + let stats = transport.get_stats()?; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + Arc::new(Mux::new(transport)), + link_id, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + Ok(newface) + } + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + } + } + } + + Ok(()) + } + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, + ) -> RoutingContext { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + } + } + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + #[inline] + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) + } + + #[inline] + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false + } +} + +struct HatContext { router_subs: HashSet, peer_subs: HashSet, router_qabls: HashMap, @@ -229,7 +644,7 @@ pub(crate) struct HatContext { } impl HatContext { - pub(crate) fn new() -> Self { + fn new() -> Self { Self { router_subs: HashSet::new(), peer_subs: HashSet::new(), @@ -239,7 +654,7 @@ impl HatContext { } } -pub(crate) struct HatFace { +struct HatFace { local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -247,7 +662,7 @@ pub(crate) struct HatFace { } impl HatFace { - pub(crate) fn new() -> Self { + fn new() -> Self { Self { local_subs: HashSet::new(), remote_subs: HashSet::new(), @@ -257,7 +672,7 @@ impl HatFace { } } -pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { +pub(super) fn close_face(tables: &TablesLock, face: &mut Arc) { let ctrl_lock = zlock!(tables.ctrl_lock); let mut wtables = zwrite!(tables.tables); let mut face_clone = face.clone(); @@ -330,10 +745,16 @@ pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { let mut matches_query_routes = vec![]; let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { - matches_data_routes.push((_match.clone(), compute_data_routes_(&rtables, &_match))); + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); } for _match in qabls_matches.drain(..) { - matches_query_routes.push((_match.clone(), compute_query_routes_(&rtables, &_match))); + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); } drop(rtables); @@ -355,310 +776,6 @@ pub(crate) fn close_face(tables: &TablesLock, face: &mut Arc) { drop(ctrl_lock); } -#[allow(clippy::too_many_arguments)] -pub(crate) fn init( - tables: &Arc, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, -) { - let mut tables = zwrite!(tables.tables); - if router_full_linkstate | gossip { - hat_mut!(tables).routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } -} - -pub(crate) fn new_transport_unicast( - tables_ref: &Arc, - transport: TransportUnicast, -) -> ZResult> { - let ctrl_lock = zlock!(tables_ref.ctrl_lock); - let mut tables = zwrite!(tables_ref.tables); - let whatami = transport.get_whatami()?; - - let link_id = match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } - } - _ => 0, - }; - - if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - let face = tables - .open_net_face( - transport.get_zid()?, - transport.get_whatami()?, - #[cfg(feature = "stats")] - transport.get_stats()?, - Arc::new(Mux::new(transport)), - link_id, - ) - .upgrade() - .unwrap(); - - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } - } - _ => (), - } - drop(tables); - drop(ctrl_lock); - Ok(face) -} - -pub(crate) fn handle_oam( - tables_ref: &Arc, - oam: Oam, - transport: &TransportUnicast, -) -> ZResult<()> { - if oam.id == OAM_LINKSTATE { - if let ZExtBody::ZBuf(buf) = oam.body { - if let Ok(zid) = transport.get_zid() { - use zenoh_buffers::reader::HasReader; - use zenoh_codec::RCodec; - let codec = Zenoh080Routing::new(); - let mut reader = buf.reader(); - let list: LinkStateList = codec.read(&mut reader).unwrap(); - - let ctrl_lock = zlock!(tables_ref.ctrl_lock); - let mut tables = zwrite!(tables_ref.tables); - let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - &mut tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - &mut tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - &mut tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), - }; - drop(tables); - drop(ctrl_lock); - } - } - } - - Ok(()) -} - -pub(crate) fn closing(tables_ref: &Arc, transport: &TransportUnicast) -> ZResult<()> { - match (transport.get_zid(), transport.get_whatami()) { - (Ok(zid), Ok(whatami)) => { - let ctrl_lock = zlock!(tables_ref.ctrl_lock); - let mut tables = zwrite!(tables_ref.tables); - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in hat_mut!(tables) - .peers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(&mut tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), - }; - drop(tables); - drop(ctrl_lock); - } - (_, _) => log::error!("Closed transport in session closing!"), - } - Ok(()) -} - -pub(crate) fn map_routing_context( - tables: &Tables, - face: &FaceState, - routing_context: RoutingContext, -) -> RoutingContext { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id) - } else { - 0 - } - } - _ => 0, - }, - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id) - } else { - 0 - } - } - _ => 0, - } -} - #[macro_export] macro_rules! hat { ($t:expr) => { @@ -759,47 +876,147 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O } } -#[inline] -pub(crate) fn ingress_filter(tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) -} +pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} -#[inline] -pub(crate) fn egress_filter( - tables: &Tables, - src_face: &FaceState, - out_face: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != out_face.id - && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); +impl HatTrait for HatCode {} - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); +pub(crate) trait HatBaseTrait { + fn as_any(&self) -> &dyn Any; + + #[allow(clippy::too_many_arguments)] + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ); + + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new(router_peers_failover_brokering)) + } + + fn new_face(&self) -> Box { + Box::new(HatFace::new()) + } + + fn new_resource(&self) -> Box { + Box::new(HatContext::new()) } - false + + fn new_local_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + primitives: Arc, + ) -> ZResult>; + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: TransportUnicast, + ) -> ZResult>; + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()>; + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, + ) -> RoutingContext; + + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool; + + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool; + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()>; +} + +pub(crate) trait HatPubSubTrait { + fn declare_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: RoutingContext, + ); + fn forget_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ); + + fn compute_data_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc; + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc; + + fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes; + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc); +} + +pub(crate) trait HatQueriesTrait { + fn declare_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: RoutingContext, + ); + fn forget_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ); + fn compute_query_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc; + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc); + fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes; + fn compute_local_replies( + &self, + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, + ) -> Vec<(WireExpr<'static>, ZBuf)>; } diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/pubsub.rs index ea67068b9f..9f7d41d9d1 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/pubsub.rs @@ -17,7 +17,7 @@ use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContex use super::super::dispatcher::tables::{Tables, TablesLock}; use super::super::PREFIX_LIVELINESS; use super::network::Network; -use super::{get_peer, get_router, HatContext, HatFace, HatTables}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatPubSubTrait, HatTables}; use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use petgraph::graph::NodeIndex; @@ -511,35 +511,6 @@ fn declare_client_subscription( } } -pub(crate) fn declare_subscription( - tables: &TablesLock, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - node_id: RoutingContext, -) { - let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_subscription(tables, rtables, face, expr, sub_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) - } - } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) - } - } - _ => declare_client_subscription(tables, rtables, face, expr, sub_info), - } -} - #[inline] fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() @@ -842,7 +813,7 @@ fn forget_peer_subscription( } } -pub(crate) fn undeclare_client_subscription( +pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -934,35 +905,7 @@ fn forget_client_subscription( } } -pub(crate) fn forget_subscription( - tables: &TablesLock, - face: &mut Arc, - expr: &WireExpr, - node_id: RoutingContext, -) { - let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_subscription(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer) - } - } else { - forget_client_subscription(tables, rtables, face, expr) - } - } - _ => forget_client_subscription(tables, rtables, face, expr), - } -} - -pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { let sub_info = SubscriberInfo { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, @@ -1072,7 +1015,7 @@ pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { } } -pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { match net_type { WhatAmI::Router => { for mut res in hat!(tables) @@ -1125,7 +1068,7 @@ pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } -pub(crate) fn pubsub_tree_change( +pub(super) fn pubsub_tree_change( tables: &mut Tables, new_childs: &[Vec], net_type: WhatAmI, @@ -1174,7 +1117,7 @@ pub(crate) fn pubsub_tree_change( compute_data_routes_from(tables, &mut tables.root_res.clone()); } -pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { if hat!(tables).router_peers_failover_brokering && tables.whatami == WhatAmI::Router @@ -1285,65 +1228,145 @@ fn insert_faces_for_subs( } } -pub(crate) fn compute_data_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: RoutingContext, - source_type: WhatAmI, -) -> Arc { - let mut route = HashMap::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return Arc::new(route); +impl HatPubSubTrait for HatCode { + fn declare_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_subscription(tables, rtables, face, expr, sub_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + } + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) + } + } + _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } } - log::trace!( - "compute_data_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + + fn forget_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_subscription(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer) + } + } else { + forget_client_subscription(tables, rtables, face, expr) + } + } + _ => forget_client_subscription(tables, rtables, face, expr), + } + } + + fn compute_data_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { return Arc::new(route); } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_subs, - ); + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } } - if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Peer => source, + WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, }; insert_faces_for_subs( @@ -1355,180 +1378,80 @@ pub(crate) fn compute_data_route( &res_hat!(mres).peer_subs, ); } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, *sid); + ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ) + }); } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - ( - context.face.clone(), - key_expr.to_owned(), - RoutingContext::default(), - ) - }); } } } } - } - for mcast_group in &tables.mcast_groups { - route.insert( - mcast_group.id, - ( - mcast_group.clone(), - expr.full_expr().to_string().into(), - RoutingContext::default(), - ), - ); - } - Arc::new(route) -} - -pub(crate) fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return Arc::new(pull_caches); - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - Arc::new(pull_caches) -} - -pub(crate) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + RoutingContext::default(), + ), ); } - - routes.peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + Arc::new(route) } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - for idx in &indexes { - routes.peers_data_routes[idx.index()] = compute_data_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } } + Arc::new(pull_caches) } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - routes.peer_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); - routes -} -pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); + fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; let mut expr = RoutingExpr::new(res, ""); if tables.whatami == WhatAmI::Router { let indexes = hat!(tables) @@ -1539,12 +1462,12 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routers_data_routes[idx.index()] = compute_data_route( + routes.routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, idx.index() as RoutingContext, @@ -1552,7 +1475,7 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) ); } - res_mut.context_mut().peer_data_route = Some(compute_data_route( + routes.peer_data_route = Some(self.compute_data_route( tables, &mut expr, RoutingContext::default(), @@ -1570,12 +1493,12 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - peers_data_routes[idx.index()] = compute_data_route( + routes.peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, idx.index() as RoutingContext, @@ -1584,13 +1507,13 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) } } if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(compute_data_route( + routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, RoutingContext::default(), WhatAmI::Client, )); - res_mut.context_mut().peer_data_route = Some(compute_data_route( + routes.peer_data_route = Some(self.compute_data_route( tables, &mut expr, RoutingContext::default(), @@ -1598,13 +1521,98 @@ pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) )); } if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(compute_data_route( + routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, RoutingContext::default(), WhatAmI::Client, )); } - res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); + routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes + } + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + } } } diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/queries.rs index 43c2eb737e..0982a232fc 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/queries.rs @@ -16,7 +16,7 @@ use super::super::dispatcher::queries::*; use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; use super::super::dispatcher::tables::{Tables, TablesLock}; use super::network::Network; -use super::{get_peer, get_router, HatContext, HatFace, HatTables}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatQueriesTrait, HatTables}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; @@ -627,35 +627,6 @@ fn declare_client_queryable( } } -pub(crate) fn declare_queryable( - tables: &TablesLock, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - node_id: RoutingContext, -) { - let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_queryable(tables, rtables, face, expr, qabl_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) - } - } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info) - } - } - _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), - } -} - #[inline] fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() @@ -960,7 +931,7 @@ fn forget_peer_queryable( } } -pub(crate) fn undeclare_client_queryable( +pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, res: &mut Arc, @@ -1063,35 +1034,7 @@ fn forget_client_queryable( } } -pub(crate) fn forget_queryable( - tables: &TablesLock, - face: &mut Arc, - expr: &WireExpr, - node_id: RoutingContext, -) { - let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_queryable(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer) - } - } else { - forget_client_queryable(tables, rtables, face, expr) - } - } - _ => forget_client_queryable(tables, rtables, face, expr), - } -} - -pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { match tables.whatami { WhatAmI::Router => { if face.whatami == WhatAmI::Client { @@ -1190,7 +1133,7 @@ pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { } } -pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { match net_type { WhatAmI::Router => { let mut qabls = vec![]; @@ -1249,7 +1192,7 @@ pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: } } -pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { +pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { if hat!(tables).router_peers_failover_brokering && tables.whatami == WhatAmI::Router @@ -1324,7 +1267,7 @@ pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links } } -pub(crate) fn queries_tree_change( +pub(super) fn queries_tree_change( tables: &mut Tables, new_childs: &[Vec], net_type: WhatAmI, @@ -1413,67 +1356,149 @@ fn insert_target_for_qabls( lazy_static::lazy_static! { static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); } -pub(crate) fn compute_query_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: RoutingContext, - source_type: WhatAmI, -) -> Arc { - let mut route = QueryTargetQablSet::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return EMPTY_ROUTE.clone(); + +impl HatQueriesTrait for HatCode { + fn declare_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + } + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info) + } + } + _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } } - log::trace!( - "compute_query_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); + + fn forget_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_queryable(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer) + } + } else { + forget_client_queryable(tables, rtables, face, expr) + } + } + _ => forget_client_queryable(tables, rtables, face, expr), + } + } + + fn compute_query_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { return EMPTY_ROUTE.clone(); } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_qabls, - complete, - ); + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_qabls, + complete, + ); + } - if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Peer => source, + WhatAmI::Router | WhatAmI::Peer => source, _ => net.idx.index() as RoutingContext, }; insert_target_for_qabls( @@ -1486,186 +1511,89 @@ pub(crate) fn compute_query_route( complete, ); } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: ( - context.face.clone(), - key_expr.to_owned(), - RoutingContext::default(), - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } } } } } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) } - route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); - Arc::new(route) -} -#[inline] -pub(crate) fn compute_local_replies( - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, -) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + #[inline] + fn compute_local_replies( + &self, + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, + ) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!res_hat!(mres).router_subs.is_empty() + || !res_hat!(mres).peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } } } } + result } - result -} - -pub(crate) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Router, - ); - } - - routes.peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - for idx in &indexes { - routes.peers_query_routes[idx.index()] = compute_query_route( - tables, - &mut expr, - idx.index() as RoutingContext, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - routes.peer_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Client, - )); - } - routes -} - -pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); + fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; let mut expr = RoutingExpr::new(res, ""); if tables.whatami == WhatAmI::Router { let indexes = hat!(tables) @@ -1676,13 +1604,12 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes + routes + .routers_query_routes .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routers_query_routes[idx.index()] = compute_query_route( + routes.routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, idx.index() as RoutingContext, @@ -1690,7 +1617,7 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) ); } - res_mut.context_mut().peer_query_route = Some(compute_query_route( + routes.peer_query_route = Some(self.compute_query_route( tables, &mut expr, RoutingContext::default(), @@ -1708,13 +1635,12 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes + routes + .peers_query_routes .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - peers_query_routes[idx.index()] = compute_query_route( + routes.peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, idx.index() as RoutingContext, @@ -1723,13 +1649,13 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) } } if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(compute_query_route( + routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, RoutingContext::default(), WhatAmI::Client, )); - res_mut.context_mut().peer_query_route = Some(compute_query_route( + routes.peer_query_route = Some(self.compute_query_route( tables, &mut expr, RoutingContext::default(), @@ -1737,12 +1663,98 @@ pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) )); } if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(compute_query_route( + routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, RoutingContext::default(), WhatAmI::Client, )); } + routes + } + + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + } } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 833720c58c..96f457621a 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -17,11 +17,8 @@ pub use super::dispatcher::queries::*; pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; -use super::hat::closing; -use super::hat::init; -use super::hat::new_transport_unicast; +use super::hat::HatCode; use super::runtime::Runtime; -use crate::net::routing::hat::handle_oam; use std::any::Any; use std::str::FromStr; use std::sync::Arc; @@ -63,7 +60,7 @@ impl Router { router_peers_failover_brokering, queries_default_timeout, )), - ctrl_lock: Mutex::new(()), + ctrl_lock: Box::new(Mutex::new(HatCode {})), queries_lock: RwLock::new(()), }), } @@ -80,8 +77,10 @@ impl Router { gossip_multihop: bool, autoconnect: WhatAmIMatcher, ) { - init( - &self.tables, + let ctrl_lock = zlock!(self.tables.ctrl_lock); + let mut tables = zwrite!(self.tables.tables); + ctrl_lock.init( + &mut tables, runtime, router_full_linkstate, peer_full_linkstate, @@ -98,10 +97,8 @@ impl Router { state: { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); - let zid = tables.zid; - let face = tables - .open_face(zid, WhatAmI::Client, primitives) - .upgrade() + let face = ctrl_lock + .new_local_face(&mut tables, &self.tables, primitives) .unwrap(); drop(tables); drop(ctrl_lock); @@ -114,12 +111,18 @@ impl Router { &self, transport: TransportUnicast, ) -> ZResult> { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + let mut tables = zwrite!(self.tables.tables); Ok(Arc::new(LinkStateInterceptor::new( transport.clone(), self.tables.clone(), Face { tables: self.tables.clone(), - state: new_transport_unicast(&self.tables, transport)?, + state: ctrl_lock.new_transport_unicast_face( + &mut tables, + &self.tables, + transport, + )?, }, ))) } @@ -128,6 +131,7 @@ impl Router { let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; + let hat_face = tables.hat_code.new_face(); tables.mcast_groups.push(FaceState::new( fid, ZenohId::from_str("1").unwrap(), @@ -137,6 +141,7 @@ impl Router { Arc::new(McastMux::new(transport.clone())), 0, Some(transport), + hat_face, )); // recompute routes @@ -162,6 +167,7 @@ impl Router { Arc::new(DummyPrimitives), 0, Some(transport), + tables.hat_code.new_face(), ); tables.mcast_faces.push(face_state.clone()); @@ -197,7 +203,11 @@ impl TransportPeerEventHandler for LinkStateInterceptor { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { log::trace!("Recv {:?}", msg); match msg.body { - NetworkBody::OAM(oam) => handle_oam(&self.tables, oam, &self.transport), + NetworkBody::OAM(oam) => { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + let mut tables = zwrite!(self.tables.tables); + ctrl_lock.handle_oam(&mut tables, &self.tables, oam, &self.transport) + } _ => self.demux.handle_message(msg), } } @@ -208,7 +218,9 @@ impl TransportPeerEventHandler for LinkStateInterceptor { fn closing(&self) { self.demux.closing(); - let _ = closing(&self.tables, &self.transport); + let ctrl_lock = zlock!(self.tables.ctrl_lock); + let mut tables = zwrite!(self.tables.tables); + let _ = ctrl_lock.closing(&mut tables, &self.tables, &self.transport); } fn closed(&self) {} From 002db77bfddcea63af55e4bbc0e20d60b3c7ed29 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 15 Nov 2023 12:24:50 +0100 Subject: [PATCH 009/122] Abstract missing close_face fn --- zenoh/src/net/routing/dispatcher/tables.rs | 2 +- zenoh/src/net/routing/hat/mod.rs | 214 ++++++++++----------- 2 files changed, 108 insertions(+), 108 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 54867b493e..af07713220 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -155,7 +155,7 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { Some(mut face) => { log::debug!("Close {}", face); finalize_pending_queries(tables, &mut face); - super::super::hat::close_face(tables, &mut face); + zlock!(tables.ctrl_lock).close_face(tables, &mut face); } None => log::error!("Face already closed!"), } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index cef5816aa2..3c627f234c 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -64,9 +64,9 @@ use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; use zenoh_transport::{Mux, Primitives, TransportUnicast}; -pub mod network; -pub mod pubsub; -pub mod queries; +mod network; +mod pubsub; +mod queries; zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; @@ -386,6 +386,108 @@ impl HatBaseTrait for HatCode { Ok(newface) } + fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + } + fn handle_oam( &self, tables: &mut Tables, @@ -672,110 +774,6 @@ impl HatFace { } } -pub(super) fn close_face(tables: &TablesLock, face: &mut Arc) { - let ctrl_lock = zlock!(tables.ctrl_lock); - let mut wtables = zwrite!(tables.tables); - let mut face_clone = face.clone(); - let face = get_mut_unchecked(face); - for res in face.remote_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.remote_mappings.clear(); - for res in face.local_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.local_mappings.clear(); - - let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - subs_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; - subs_matches.push(res); - } - } - - let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - qabls_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; - qabls_matches.push(res); - } - } - drop(wtables); - - let mut matches_data_routes = vec![]; - let mut matches_query_routes = vec![]; - let rtables = zread!(tables.tables); - for _match in subs_matches.drain(..) { - matches_data_routes.push(( - _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), - )); - } - for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), - )); - } - drop(rtables); - - let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - Resource::clean(&mut res); - } - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - Resource::clean(&mut res); - } - wtables.faces.remove(&face.id); - drop(wtables); - drop(ctrl_lock); -} - #[macro_export] macro_rules! hat { ($t:expr) => { @@ -953,6 +951,8 @@ pub(crate) trait HatBaseTrait { tables_ref: &Arc, transport: &TransportUnicast, ) -> ZResult<()>; + + fn close_face(&self, tables: &TablesLock, face: &mut Arc); } pub(crate) trait HatPubSubTrait { From a33fcf138627f245f7273892b7679a7d8bd2b2bf Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 15 Nov 2023 12:39:58 +0100 Subject: [PATCH 010/122] Duplicate hat --- zenoh/src/net/routing/dispatcher/tables.rs | 9 +- zenoh/src/net/routing/hat/client/mod.rs | 885 +++++++++ .../net/routing/hat/{ => client}/network.rs | 0 .../net/routing/hat/{ => client}/pubsub.rs | 15 +- .../net/routing/hat/{ => client}/queries.rs | 13 +- zenoh/src/net/routing/hat/mod.rs | 859 +------- zenoh/src/net/routing/hat/peer/mod.rs | 885 +++++++++ zenoh/src/net/routing/hat/peer/network.rs | 1011 ++++++++++ zenoh/src/net/routing/hat/peer/pubsub.rs | 1619 +++++++++++++++ zenoh/src/net/routing/hat/peer/queries.rs | 1761 +++++++++++++++++ zenoh/src/net/routing/hat/router/mod.rs | 885 +++++++++ zenoh/src/net/routing/hat/router/network.rs | 1011 ++++++++++ zenoh/src/net/routing/hat/router/pubsub.rs | 1619 +++++++++++++++ zenoh/src/net/routing/hat/router/queries.rs | 1761 +++++++++++++++++ zenoh/src/net/routing/router.rs | 4 +- 15 files changed, 11475 insertions(+), 862 deletions(-) create mode 100644 zenoh/src/net/routing/hat/client/mod.rs rename zenoh/src/net/routing/hat/{ => client}/network.rs (100%) rename zenoh/src/net/routing/hat/{ => client}/pubsub.rs (99%) rename zenoh/src/net/routing/hat/{ => client}/queries.rs (99%) create mode 100644 zenoh/src/net/routing/hat/peer/mod.rs create mode 100644 zenoh/src/net/routing/hat/peer/network.rs create mode 100644 zenoh/src/net/routing/hat/peer/pubsub.rs create mode 100644 zenoh/src/net/routing/hat/peer/queries.rs create mode 100644 zenoh/src/net/routing/hat/router/mod.rs create mode 100644 zenoh/src/net/routing/hat/router/network.rs create mode 100644 zenoh/src/net/routing/hat/router/pubsub.rs create mode 100644 zenoh/src/net/routing/hat/router/queries.rs diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index af07713220..6559296fbb 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -1,4 +1,3 @@ -use crate::net::routing::hat::HatBaseTrait; // // Copyright (c) 2023 ZettaScale Technology // @@ -16,7 +15,7 @@ use super::face::FaceState; pub use super::pubsub::*; pub use super::queries::*; pub use super::resource::*; -use crate::net::routing::hat::HatCode; +use crate::net::routing::hat; use crate::net::routing::hat::HatTrait; use std::any::Any; use std::collections::HashMap; @@ -83,7 +82,7 @@ impl Tables { router_peers_failover_brokering: bool, _queries_default_timeout: Duration, ) -> Self { - let hat_code = Arc::new(HatCode {}); + let hat_code = hat::new_hat(whatami); Tables { zid, whatami, @@ -98,7 +97,7 @@ impl Tables { mcast_faces: vec![], pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), - hat_code, + hat_code: hat_code.into(), } } @@ -163,6 +162,6 @@ pub fn close_face(tables: &TablesLock, face: &Weak) { pub struct TablesLock { pub tables: RwLock, - pub(crate) ctrl_lock: Box>, + pub(crate) ctrl_lock: Mutex>, pub queries_lock: RwLock<()>, } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs new file mode 100644 index 0000000000..d84128a037 --- /dev/null +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -0,0 +1,885 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use self::{ + network::{Network, shared_nodes}, + pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, +}; +use super::{super::dispatcher::{ + face::FaceState, + tables::{ + Resource, RoutingContext, + RoutingExpr, Tables, TablesLock, + }, +}, HatBaseTrait, HatTrait}; +use crate::{ + net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + }, + runtime::Runtime, +}; +use async_std::task::JoinHandle; +use std::{ + any::Any, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + sync::Arc, +}; +use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::queryable::ext::QueryableInfo, + oam::id::OAM_LINKSTATE, + Oam, + }, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::{Mux, Primitives, TransportUnicast}; + +mod network; +mod pubsub; +mod queries; + +zconfigurable! { + static ref TREES_COMPUTATION_DELAY: u64 = 100; +} + + + +macro_rules! hat { + ($t:expr) => { + $t.hat.downcast_ref::().unwrap() + }; +} +use hat; + +macro_rules! hat_mut { + ($t:expr) => { + $t.hat.downcast_mut::().unwrap() + }; +} +use hat_mut; + +macro_rules! res_hat { + ($r:expr) => { + $r.context().hat.downcast_ref::().unwrap() + }; +} +use res_hat; + +macro_rules! res_hat_mut { + ($r:expr) => { + get_mut_unchecked($r) + .context_mut() + .hat + .downcast_mut::() + .unwrap() + }; +} +use res_hat_mut; + +macro_rules! face_hat { + ($f:expr) => { + $f.hat.downcast_ref::().unwrap() + }; +} +use face_hat; + +macro_rules! face_hat_mut { + ($f:expr) => { + get_mut_unchecked($f).hat.downcast_mut::().unwrap() + }; +} +use face_hat_mut; + + + +struct HatTables { + router_subs: HashSet>, + peer_subs: HashSet>, + router_qabls: HashSet>, + peer_qabls: HashSet>, + routers_net: Option, + peers_net: Option, + shared_nodes: Vec, + routers_trees_task: Option>, + peers_trees_task: Option>, + router_peers_failover_brokering: bool, +} + +impl HatTables { + fn new(router_peers_failover_brokering: bool) -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashSet::new(), + peer_qabls: HashSet::new(), + routers_net: None, + peers_net: None, + shared_nodes: vec![], + routers_trees_task: None, + peers_trees_task: None, + router_peers_failover_brokering, + } + } + + #[inline] + fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + match net_type { + WhatAmI::Router => self.routers_net.as_ref(), + WhatAmI::Peer => self.peers_net.as_ref(), + _ => None, + } + } + + #[inline] + fn full_net(&self, net_type: WhatAmI) -> bool { + match net_type { + WhatAmI::Router => self + .routers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + WhatAmI::Peer => self + .peers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + _ => false, + } + } + + #[inline] + fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + self.peers_net + .as_ref() + .unwrap() + .get_links(peer) + .iter() + .filter(move |nid| { + if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { + node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router + } else { + false + } + }) + } + + #[inline] + fn elect_router<'a>( + &'a self, + self_zid: &'a ZenohId, + key_expr: &str, + mut routers: impl Iterator, + ) -> &'a ZenohId { + match routers.next() { + None => self_zid, + Some(router) => { + let hash = |r: &ZenohId| { + let mut hasher = DefaultHasher::new(); + for b in key_expr.as_bytes() { + hasher.write_u8(*b); + } + for b in &r.to_le_bytes()[..r.size()] { + hasher.write_u8(*b); + } + hasher.finish() + }; + let mut res = router; + let mut h = None; + for router2 in routers { + let h2 = hash(router2); + if h2 > *h.get_or_insert_with(|| hash(res)) { + res = router2; + h = Some(h2); + } + } + res + } + } + } + + #[inline] + fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + // if source_links is empty then gossip is probably disabled in source peer + !source_links.is_empty() && !source_links.contains(&dest) + } + + #[inline] + fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + self.router_peers_failover_brokering + && self + .peers_net + .as_ref() + .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .unwrap_or(false) + } + + fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + log::trace!("Schedule computations"); + if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + { + let task = Some(async_std::task::spawn(async move { + async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); + + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, + }; + })); + match net_type { + WhatAmI::Router => self.routers_trees_task = task, + _ => self.peers_trees_task = task, + }; + } + } +} + +pub(crate) struct HatCode {} + +impl HatBaseTrait for HatCode { + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) { + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + } + + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new(router_peers_failover_brokering)) + } + + fn new_face(&self) -> Box { + Box::new(HatFace::new()) + } + + fn new_resource(&self) -> Box { + Box::new(HatContext::new()) + } + + fn new_local_face( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + primitives: Arc, + ) -> ZResult> { + let fid = tables.face_counter; + tables.face_counter += 1; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + tables.zid, + WhatAmI::Client, + #[cfg(feature = "stats")] + None, + primitives.clone(), + 0, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + Ok(newface) + } + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: TransportUnicast, + ) -> ZResult> { + let whatami = transport.get_whatami()?; + + let link_id = match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + let fid = tables.face_counter; + tables.face_counter += 1; + let zid = transport.get_zid()?; + #[cfg(feature = "stats")] + let stats = transport.get_stats()?; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + Arc::new(Mux::new(transport)), + link_id, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + Ok(newface) + } + + fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + } + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + } + } + } + + Ok(()) + } + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, + ) -> RoutingContext { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + } + } + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + #[inline] + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) + } + + #[inline] + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false + } +} + +struct HatContext { + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, +} + +impl HatContext { + fn new() -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashMap::new(), + peer_qabls: HashMap::new(), + } + } +} + +struct HatFace { + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, +} + +impl HatFace { + fn new() -> Self { + Self { + local_subs: HashSet::new(), + remote_subs: HashSet::new(), + local_qabls: HashMap::new(), + remote_qabls: HashSet::new(), + } + } +} + +fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received router declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in routers network for {}", + face + ); + None + } + } +} + +fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received peer declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in peers network for {}", + face + ); + None + } + } +} + +impl HatTrait for HatCode {} \ No newline at end of file diff --git a/zenoh/src/net/routing/hat/network.rs b/zenoh/src/net/routing/hat/client/network.rs similarity index 100% rename from zenoh/src/net/routing/hat/network.rs rename to zenoh/src/net/routing/hat/client/network.rs diff --git a/zenoh/src/net/routing/hat/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs similarity index 99% rename from zenoh/src/net/routing/hat/pubsub.rs rename to zenoh/src/net/routing/hat/client/pubsub.rs index 9f7d41d9d1..0405107f86 100644 --- a/zenoh/src/net/routing/hat/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,15 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::dispatcher::face::FaceState; -use super::super::dispatcher::pubsub::*; -use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; -use super::super::dispatcher::tables::{Tables, TablesLock}; -use super::super::PREFIX_LIVELINESS; +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::pubsub::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::hat::HatPubSubTrait; use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatPubSubTrait, HatTables}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; -use crate::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; diff --git a/zenoh/src/net/routing/hat/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs similarity index 99% rename from zenoh/src/net/routing/hat/queries.rs rename to zenoh/src/net/routing/hat/client/queries.rs index 0982a232fc..7ee731e05a 100644 --- a/zenoh/src/net/routing/hat/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,17 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::dispatcher::face::FaceState; -use super::super::dispatcher::queries::*; -use super::super::dispatcher::resource::{Resource, RoutingContext, SessionContext}; -use super::super::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::queries::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatQueriesTrait, HatTables}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; use crate::net::routing::PREFIX_LIVELINESS; -use crate::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 3c627f234c..7f2a1a7e9c 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,11 +17,6 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::Network, - pubsub::{pubsub_new_face, undeclare_client_subscription}, - queries::{queries_new_face, undeclare_client_queryable}, -}; use super::dispatcher::{ face::FaceState, tables::{ @@ -29,855 +24,33 @@ use super::dispatcher::{ RoutingExpr, Tables, TablesLock, }, }; -use crate::{ - hat, hat_mut, - net::{ - codec::Zenoh080Routing, - protocol::linkstate::LinkStateList, - routing::hat::{ - network::shared_nodes, - pubsub::{pubsub_linkstate_change, pubsub_remove_node}, - queries::{queries_linkstate_change, queries_remove_node}, - }, - }, - runtime::Runtime, -}; -use async_std::task::JoinHandle; +use crate::runtime::Runtime; use std::{ any::Any, - collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, sync::Arc, }; use zenoh_buffers::ZBuf; -use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ - common::ZExtBody, core::WireExpr, network::{ declare::{queryable::ext::QueryableInfo, subscriber::ext::SubscriberInfo}, - oam::id::OAM_LINKSTATE, Oam, }, }; use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; -use zenoh_transport::{Mux, Primitives, TransportUnicast}; +use zenoh_transport::{Primitives, TransportUnicast}; -mod network; -mod pubsub; -mod queries; +mod client; +mod peer; +mod router; zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } -struct HatTables { - router_subs: HashSet>, - peer_subs: HashSet>, - router_qabls: HashSet>, - peer_qabls: HashSet>, - routers_net: Option, - peers_net: Option, - shared_nodes: Vec, - routers_trees_task: Option>, - peers_trees_task: Option>, - router_peers_failover_brokering: bool, -} - -impl HatTables { - fn new(router_peers_failover_brokering: bool) -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), - routers_net: None, - peers_net: None, - shared_nodes: vec![], - routers_trees_task: None, - peers_trees_task: None, - router_peers_failover_brokering, - } - } - - #[inline] - fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { - match net_type { - WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), - _ => None, - } - } - - #[inline] - fn full_net(&self, net_type: WhatAmI) -> bool { - match net_type { - WhatAmI::Router => self - .routers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - WhatAmI::Peer => self - .peers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - _ => false, - } - } - - #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net - .as_ref() - .unwrap() - .get_links(peer) - .iter() - .filter(move |nid| { - if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { - node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router - } else { - false - } - }) - } - - #[inline] - fn elect_router<'a>( - &'a self, - self_zid: &'a ZenohId, - key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { - match routers.next() { - None => self_zid, - Some(router) => { - let hash = |r: &ZenohId| { - let mut hasher = DefaultHasher::new(); - for b in key_expr.as_bytes() { - hasher.write_u8(*b); - } - for b in &r.to_le_bytes()[..r.size()] { - hasher.write_u8(*b); - } - hasher.finish() - }; - let mut res = router; - let mut h = None; - for router2 in routers { - let h2 = hash(router2); - if h2 > *h.get_or_insert_with(|| hash(res)) { - res = router2; - h = Some(h2); - } - } - res - } - } - } - - #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { - // if source_links is empty then gossip is probably disabled in source peer - !source_links.is_empty() && !source_links.contains(&dest) - } - - #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { - self.router_peers_failover_brokering - && self - .peers_net - .as_ref() - .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) - .unwrap_or(false) - } - - fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; - - log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); - - log::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; - })); - match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, - }; - } - } -} - -pub(crate) struct HatCode {} - -impl HatBaseTrait for HatCode { - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { - if router_full_linkstate | gossip { - hat_mut!(tables).routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - } - - fn new_local_face( - &self, - tables: &mut Tables, - _tables_ref: &Arc, - primitives: Arc, - ) -> ZResult> { - let fid = tables.face_counter; - tables.face_counter += 1; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - tables.zid, - WhatAmI::Client, - #[cfg(feature = "stats")] - None, - primitives.clone(), - 0, - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); - - Ok(newface) - } - - fn new_transport_unicast_face( - &self, - tables: &mut Tables, - tables_ref: &Arc, - transport: TransportUnicast, - ) -> ZResult> { - let whatami = transport.get_whatami()?; - - let link_id = match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } - } - _ => 0, - }; - - if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - let fid = tables.face_counter; - tables.face_counter += 1; - let zid = transport.get_zid()?; - #[cfg(feature = "stats")] - let stats = transport.get_stats()?; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - Arc::new(Mux::new(transport)), - link_id, - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); - - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } - } - _ => (), - } - Ok(newface) - } - - fn close_face(&self, tables: &TablesLock, face: &mut Arc) { - let mut wtables = zwrite!(tables.tables); - let mut face_clone = face.clone(); - let face = get_mut_unchecked(face); - for res in face.remote_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.remote_mappings.clear(); - for res in face.local_mappings.values_mut() { - get_mut_unchecked(res).session_ctxs.remove(&face.id); - Resource::clean(res); - } - face.local_mappings.clear(); - - let mut subs_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_subs - .drain() - { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - subs_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; - subs_matches.push(res); - } - } - - let mut qabls_matches = vec![]; - for mut res in face - .hat - .downcast_mut::() - .unwrap() - .remote_qabls - .drain() - { - get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); - undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); - - if res.context.is_some() { - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, &res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - qabls_matches.push(match_); - } - } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; - qabls_matches.push(res); - } - } - drop(wtables); - - let mut matches_data_routes = vec![]; - let mut matches_query_routes = vec![]; - let rtables = zread!(tables.tables); - for _match in subs_matches.drain(..) { - matches_data_routes.push(( - _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), - )); - } - for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), - )); - } - drop(rtables); - - let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - Resource::clean(&mut res); - } - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - Resource::clean(&mut res); - } - wtables.faces.remove(&face.id); - drop(wtables); - } - - fn handle_oam( - &self, - tables: &mut Tables, - tables_ref: &Arc, - oam: Oam, - transport: &TransportUnicast, - ) -> ZResult<()> { - if oam.id == OAM_LINKSTATE { - if let ZExtBody::ZBuf(buf) = oam.body { - if let Ok(zid) = transport.get_zid() { - use zenoh_buffers::reader::HasReader; - use zenoh_codec::RCodec; - let codec = Zenoh080Routing::new(); - let mut reader = buf.reader(); - let list: LinkStateList = codec.read(&mut reader).unwrap(); - - let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), - }; - } - } - } - - Ok(()) - } - - fn map_routing_context( - &self, - tables: &Tables, - face: &FaceState, - routing_context: RoutingContext, - ) -> RoutingContext { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id) - } else { - 0 - } - } - _ => 0, - }, - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face.link_id) - } else { - 0 - } - } - _ => 0, - } - } - - fn closing( - &self, - tables: &mut Tables, - tables_ref: &Arc, - transport: &TransportUnicast, - ) -> ZResult<()> { - match (transport.get_zid(), transport.get_whatami()) { - (Ok(zid), Ok(whatami)) => { - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in hat_mut!(tables) - .peers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), - }; - } - (_, _) => log::error!("Closed transport in session closing!"), - } - Ok(()) - } - - fn as_any(&self) -> &dyn Any { - self - } - - #[inline] - fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) - } - - #[inline] - fn egress_filter( - &self, - tables: &Tables, - src_face: &FaceState, - out_face: &Arc, - expr: &mut RoutingExpr, - ) -> bool { - if src_face.id != out_face.id - && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); - } - false - } -} - -struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, -} - -impl HatContext { - fn new() -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), - } - } -} - -struct HatFace { - local_subs: HashSet>, - remote_subs: HashSet>, - local_qabls: HashMap, QueryableInfo>, - remote_qabls: HashSet>, -} - -impl HatFace { - fn new() -> Self { - Self { - local_subs: HashSet::new(), - remote_subs: HashSet::new(), - local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), - } - } -} - -#[macro_export] -macro_rules! hat { - ($t:expr) => { - $t.hat.downcast_ref::().unwrap() - }; -} - -#[macro_export] -macro_rules! hat_mut { - ($t:expr) => { - $t.hat.downcast_mut::().unwrap() - }; -} - -#[macro_export] -macro_rules! res_hat { - ($r:expr) => { - $r.context().hat.downcast_ref::().unwrap() - }; -} - -#[macro_export] -macro_rules! res_hat_mut { - ($r:expr) => { - get_mut_unchecked($r) - .context_mut() - .hat - .downcast_mut::() - .unwrap() - }; -} - -#[macro_export] -macro_rules! face_hat { - ($f:expr) => { - $f.hat.downcast_ref::().unwrap() - }; -} - -#[macro_export] -macro_rules! face_hat_mut { - ($f:expr) => { - get_mut_unchecked($f).hat.downcast_mut::().unwrap() - }; -} - -fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { - match hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_link(face.link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - face - ); - None - } - } -} - -fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { - match hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_link(face.link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received peer declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in peers network for {}", - face - ); - None - } - } -} - pub(crate) trait HatTrait: HatBaseTrait + HatPubSubTrait + HatQueriesTrait {} -impl HatTrait for HatCode {} - pub(crate) trait HatBaseTrait { fn as_any(&self) -> &dyn Any; @@ -894,17 +67,11 @@ pub(crate) trait HatBaseTrait { autoconnect: WhatAmIMatcher, ); - fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { - Box::new(HatTables::new(router_peers_failover_brokering)) - } + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box; - fn new_face(&self) -> Box { - Box::new(HatFace::new()) - } + fn new_face(&self) -> Box; - fn new_resource(&self) -> Box { - Box::new(HatContext::new()) - } + fn new_resource(&self) -> Box; fn new_local_face( &self, @@ -1020,3 +187,11 @@ pub(crate) trait HatQueriesTrait { face: &Arc, ) -> Vec<(WireExpr<'static>, ZBuf)>; } + +pub(crate) fn new_hat(whatami: WhatAmI) -> Box { + match whatami { + WhatAmI::Client => Box::new(client::HatCode {}), + WhatAmI::Peer => Box::new(peer::HatCode {}), + WhatAmI::Router => Box::new(router::HatCode {}), + } +} \ No newline at end of file diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs new file mode 100644 index 0000000000..d84128a037 --- /dev/null +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -0,0 +1,885 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use self::{ + network::{Network, shared_nodes}, + pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, +}; +use super::{super::dispatcher::{ + face::FaceState, + tables::{ + Resource, RoutingContext, + RoutingExpr, Tables, TablesLock, + }, +}, HatBaseTrait, HatTrait}; +use crate::{ + net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + }, + runtime::Runtime, +}; +use async_std::task::JoinHandle; +use std::{ + any::Any, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + sync::Arc, +}; +use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::queryable::ext::QueryableInfo, + oam::id::OAM_LINKSTATE, + Oam, + }, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::{Mux, Primitives, TransportUnicast}; + +mod network; +mod pubsub; +mod queries; + +zconfigurable! { + static ref TREES_COMPUTATION_DELAY: u64 = 100; +} + + + +macro_rules! hat { + ($t:expr) => { + $t.hat.downcast_ref::().unwrap() + }; +} +use hat; + +macro_rules! hat_mut { + ($t:expr) => { + $t.hat.downcast_mut::().unwrap() + }; +} +use hat_mut; + +macro_rules! res_hat { + ($r:expr) => { + $r.context().hat.downcast_ref::().unwrap() + }; +} +use res_hat; + +macro_rules! res_hat_mut { + ($r:expr) => { + get_mut_unchecked($r) + .context_mut() + .hat + .downcast_mut::() + .unwrap() + }; +} +use res_hat_mut; + +macro_rules! face_hat { + ($f:expr) => { + $f.hat.downcast_ref::().unwrap() + }; +} +use face_hat; + +macro_rules! face_hat_mut { + ($f:expr) => { + get_mut_unchecked($f).hat.downcast_mut::().unwrap() + }; +} +use face_hat_mut; + + + +struct HatTables { + router_subs: HashSet>, + peer_subs: HashSet>, + router_qabls: HashSet>, + peer_qabls: HashSet>, + routers_net: Option, + peers_net: Option, + shared_nodes: Vec, + routers_trees_task: Option>, + peers_trees_task: Option>, + router_peers_failover_brokering: bool, +} + +impl HatTables { + fn new(router_peers_failover_brokering: bool) -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashSet::new(), + peer_qabls: HashSet::new(), + routers_net: None, + peers_net: None, + shared_nodes: vec![], + routers_trees_task: None, + peers_trees_task: None, + router_peers_failover_brokering, + } + } + + #[inline] + fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + match net_type { + WhatAmI::Router => self.routers_net.as_ref(), + WhatAmI::Peer => self.peers_net.as_ref(), + _ => None, + } + } + + #[inline] + fn full_net(&self, net_type: WhatAmI) -> bool { + match net_type { + WhatAmI::Router => self + .routers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + WhatAmI::Peer => self + .peers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + _ => false, + } + } + + #[inline] + fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + self.peers_net + .as_ref() + .unwrap() + .get_links(peer) + .iter() + .filter(move |nid| { + if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { + node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router + } else { + false + } + }) + } + + #[inline] + fn elect_router<'a>( + &'a self, + self_zid: &'a ZenohId, + key_expr: &str, + mut routers: impl Iterator, + ) -> &'a ZenohId { + match routers.next() { + None => self_zid, + Some(router) => { + let hash = |r: &ZenohId| { + let mut hasher = DefaultHasher::new(); + for b in key_expr.as_bytes() { + hasher.write_u8(*b); + } + for b in &r.to_le_bytes()[..r.size()] { + hasher.write_u8(*b); + } + hasher.finish() + }; + let mut res = router; + let mut h = None; + for router2 in routers { + let h2 = hash(router2); + if h2 > *h.get_or_insert_with(|| hash(res)) { + res = router2; + h = Some(h2); + } + } + res + } + } + } + + #[inline] + fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + // if source_links is empty then gossip is probably disabled in source peer + !source_links.is_empty() && !source_links.contains(&dest) + } + + #[inline] + fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + self.router_peers_failover_brokering + && self + .peers_net + .as_ref() + .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .unwrap_or(false) + } + + fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + log::trace!("Schedule computations"); + if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + { + let task = Some(async_std::task::spawn(async move { + async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); + + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, + }; + })); + match net_type { + WhatAmI::Router => self.routers_trees_task = task, + _ => self.peers_trees_task = task, + }; + } + } +} + +pub(crate) struct HatCode {} + +impl HatBaseTrait for HatCode { + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) { + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + } + + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new(router_peers_failover_brokering)) + } + + fn new_face(&self) -> Box { + Box::new(HatFace::new()) + } + + fn new_resource(&self) -> Box { + Box::new(HatContext::new()) + } + + fn new_local_face( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + primitives: Arc, + ) -> ZResult> { + let fid = tables.face_counter; + tables.face_counter += 1; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + tables.zid, + WhatAmI::Client, + #[cfg(feature = "stats")] + None, + primitives.clone(), + 0, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + Ok(newface) + } + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: TransportUnicast, + ) -> ZResult> { + let whatami = transport.get_whatami()?; + + let link_id = match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + let fid = tables.face_counter; + tables.face_counter += 1; + let zid = transport.get_zid()?; + #[cfg(feature = "stats")] + let stats = transport.get_stats()?; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + Arc::new(Mux::new(transport)), + link_id, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + Ok(newface) + } + + fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + } + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + } + } + } + + Ok(()) + } + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, + ) -> RoutingContext { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + } + } + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + #[inline] + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) + } + + #[inline] + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false + } +} + +struct HatContext { + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, +} + +impl HatContext { + fn new() -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashMap::new(), + peer_qabls: HashMap::new(), + } + } +} + +struct HatFace { + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, +} + +impl HatFace { + fn new() -> Self { + Self { + local_subs: HashSet::new(), + remote_subs: HashSet::new(), + local_qabls: HashMap::new(), + remote_qabls: HashSet::new(), + } + } +} + +fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received router declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in routers network for {}", + face + ); + None + } + } +} + +fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received peer declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in peers network for {}", + face + ); + None + } + } +} + +impl HatTrait for HatCode {} \ No newline at end of file diff --git a/zenoh/src/net/routing/hat/peer/network.rs b/zenoh/src/net/routing/hat/peer/network.rs new file mode 100644 index 0000000000..61b3f6c78a --- /dev/null +++ b/zenoh/src/net/routing/hat/peer/network.rs @@ -0,0 +1,1011 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::codec::Zenoh080Routing; +use crate::net::protocol::linkstate::{LinkState, LinkStateList}; +use crate::net::routing::dispatcher::tables::RoutingContext; +use crate::net::runtime::Runtime; +use async_std::task; +use petgraph::graph::NodeIndex; +use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; +use std::convert::TryInto; +use vec_map::VecMap; +use zenoh_buffers::writer::{DidntWrite, HasWriter}; +use zenoh_buffers::ZBuf; +use zenoh_codec::WCodec; +use zenoh_link::Locator; +use zenoh_protocol::common::ZExtBody; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::network::oam::id::OAM_LINKSTATE; +use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_transport::TransportUnicast; + +#[derive(Clone)] +struct Details { + zid: bool, + locators: bool, + links: bool, +} + +#[derive(Clone)] +pub(super) struct Node { + pub(super) zid: ZenohId, + pub(super) whatami: Option, + pub(super) locators: Option>, + pub(super) sn: u64, + pub(super) links: Vec, +} + +impl std::fmt::Debug for Node { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.zid) + } +} + +pub(super) struct Link { + pub(super) transport: TransportUnicast, + zid: ZenohId, + mappings: VecMap, + local_mappings: VecMap, +} + +impl Link { + fn new(transport: TransportUnicast) -> Self { + let zid = transport.get_zid().unwrap(); + Link { + transport, + zid, + mappings: VecMap::new(), + local_mappings: VecMap::new(), + } + } + + #[inline] + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + self.mappings.insert(psid.try_into().unwrap(), zid); + } + + #[inline] + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + self.mappings.get((*psid).try_into().unwrap()) + } + + #[inline] + pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { + self.local_mappings + .insert(psid.try_into().unwrap(), local_psid); + } + + #[inline] + pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { + self.local_mappings.get((*psid).try_into().unwrap()) + } +} + +pub(super) struct Changes { + pub(super) updated_nodes: Vec<(NodeIndex, Node)>, + pub(super) removed_nodes: Vec<(NodeIndex, Node)>, +} + +#[derive(Clone)] +pub(super) struct Tree { + pub(super) parent: Option, + pub(super) childs: Vec, + pub(super) directions: Vec>, +} + +pub(super) struct Network { + pub(super) name: String, + pub(super) full_linkstate: bool, + pub(super) router_peers_failover_brokering: bool, + pub(super) gossip: bool, + pub(super) gossip_multihop: bool, + pub(super) autoconnect: WhatAmIMatcher, + pub(super) idx: NodeIndex, + pub(super) links: VecMap, + pub(super) trees: Vec, + pub(super) distances: Vec, + pub(super) graph: petgraph::stable_graph::StableUnGraph, + pub(super) runtime: Runtime, +} + +impl Network { + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + name: String, + zid: ZenohId, + runtime: Runtime, + full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) -> Self { + let mut graph = petgraph::stable_graph::StableGraph::default(); + log::debug!("{} Add node (self) {}", name, zid); + let idx = graph.add_node(Node { + zid, + whatami: Some(runtime.whatami), + locators: None, + sn: 1, + links: vec![], + }); + Network { + name, + full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + idx, + links: VecMap::new(), + trees: vec![Tree { + parent: None, + childs: vec![], + directions: vec![None], + }], + distances: vec![0.0], + graph, + runtime, + } + } + + //noinspection ALL + // pub(super) fn dot(&self) -> String { + // std::format!( + // "{:?}", + // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) + // ) + // } + + #[inline] + pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + self.graph.node_weights().find(|weight| weight.zid == *zid) + } + + #[inline] + pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + self.graph + .node_indices() + .find(|idx| self.graph[*idx].zid == *zid) + } + + #[inline] + pub(super) fn get_link(&self, id: usize) -> Option<&Link> { + self.links.get(id) + } + + #[inline] + pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + self.links.values().find(|link| link.zid == *zid) + } + + #[inline] + pub(super) fn get_local_context( + &self, + context: RoutingContext, + link_id: usize, + ) -> RoutingContext { + match self.get_link(link_id) { + Some(link) => match link.get_local_psid(&(context as u64)) { + Some(psid) => (*psid).try_into().unwrap_or(0), + None => { + log::error!( + "Cannot find local psid for context {} on link {}", + context, + link_id + ); + 0 + } + }, + None => { + log::error!("Cannot find link {}", link_id); + 0 + } + } + } + + fn add_node(&mut self, node: Node) -> NodeIndex { + let zid = node.zid; + let idx = self.graph.add_node(node); + for link in self.links.values_mut() { + if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { + link.local_mappings.insert(psid, idx.index() as u64); + } + } + idx + } + + fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { + let links = if details.links { + self.graph[idx] + .links + .iter() + .filter_map(|zid| { + if let Some(idx2) = self.get_idx(zid) { + Some(idx2.index().try_into().unwrap()) + } else { + log::error!( + "{} Internal error building link state: cannot get index of {}", + self.name, + zid + ); + None + } + }) + .collect() + } else { + vec![] + }; + LinkState { + psid: idx.index().try_into().unwrap(), + sn: self.graph[idx].sn, + zid: if details.zid { + Some(self.graph[idx].zid) + } else { + None + }, + whatami: self.graph[idx].whatami, + locators: if details.locators { + if idx == self.idx { + Some(self.runtime.get_locators()) + } else { + self.graph[idx].locators.clone() + } + } else { + None + }, + links, + } + } + + fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { + let mut link_states = vec![]; + for (idx, details) in idxs { + link_states.push(self.make_link_state(idx, details)); + } + let codec = Zenoh080Routing::new(); + let mut buf = ZBuf::empty(); + codec.write(&mut buf.writer(), &LinkStateList { link_states })?; + Ok(NetworkBody::OAM(Oam { + id: OAM_LINKSTATE, + body: ZExtBody::ZBuf(buf), + ext_qos: oam::ext::QoSType::oam_default(), + ext_tstamp: None, + }) + .into()) + } + + fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { + if let Ok(msg) = self.make_msg(idxs) { + log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + if let Err(e) = transport.schedule(msg) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) + where + P: FnMut(&Link) -> bool, + { + if let Ok(msg) = self.make_msg(idxs) { + for link in self.links.values() { + if parameters(link) { + log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + if let Err(e) = link.transport.schedule(msg.clone()) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + // Indicates if locators should be included when propagating Linkstate message + // from the given node. + // Returns true if gossip is enabled and if multihop gossip is enabled or + // the node is one of self neighbours. + fn propagate_locators(&self, idx: NodeIndex) -> bool { + self.gossip + && (self.gossip_multihop + || idx == self.idx + || self.links.values().any(|link| { + self.graph + .node_weight(idx) + .map(|node| link.zid == node.zid) + .unwrap_or(true) + })) + } + + fn update_edge(&mut self, idx1: NodeIndex, idx2: NodeIndex) { + use std::hash::Hasher; + let mut hasher = std::collections::hash_map::DefaultHasher::default(); + if self.graph[idx1].zid > self.graph[idx2].zid { + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + } else { + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + } + let weight = 100.0 + ((hasher.finish() as u32) as f64) / u32::MAX as f64; + self.graph.update_edge(idx1, idx2, weight); + } + + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + + let graph = &self.graph; + let links = &mut self.links; + + let src_link = match links.values_mut().find(|link| link.zid == src) { + Some(link) => link, + None => { + log::error!( + "{} Received LinkStateList from unknown link {}", + self.name, + src + ); + return Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + } + }; + + // register psid<->zid mappings & apply mapping to nodes + #[allow(clippy::needless_collect)] // need to release borrow on self + let link_states = link_states + .into_iter() + .filter_map(|link_state| { + if let Some(zid) = link_state.zid { + src_link.set_zid_mapping(link_state.psid, zid); + if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { + src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); + } + Some(( + zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )) + } else { + match src_link.get_zid(&link_state.psid) { + Some(zid) => Some(( + *zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )), + None => { + log::error!( + "Received LinkState from {} with unknown node mapping {}", + src, + link_state.psid + ); + None + } + } + } + }) + .collect::>(); + + // apply psid<->zid mapping to links + let src_link = self.get_link_from_zid(&src).unwrap(); + let link_states = link_states + .into_iter() + .map(|(zid, wai, locs, sn, links)| { + let links: Vec = links + .iter() + .filter_map(|l| { + if let Some(zid) = src_link.get_zid(l) { + Some(*zid) + } else { + log::error!( + "{} Received LinkState from {} with unknown link mapping {}", + self.name, + src, + l + ); + None + } + }) + .collect(); + (zid, wai, locs, sn, links) + }) + .collect::>(); + + // log::trace!( + // "{} Received from {} mapped: {:?}", + // self.name, + // src, + // link_states + // ); + for link_state in &link_states { + log::trace!( + "{} Received from {} mapped: {:?}", + self.name, + src, + link_state + ); + } + + if !self.full_linkstate { + let mut changes = Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + for (zid, whatami, locators, sn, links) in link_states.into_iter() { + let idx = match self.get_idx(&zid) { + None => { + let idx = self.add_node(Node { + zid, + whatami: Some(whatami), + locators: locators.clone(), + sn, + links, + }); + changes.updated_nodes.push((idx, self.graph[idx].clone())); + locators.is_some().then_some(idx) + } + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + (oldsn < sn) + .then(|| { + node.sn = sn; + node.links = links.clone(); + changes.updated_nodes.push((idx, node.clone())); + (node.locators != locators && locators.is_some()).then(|| { + node.locators = locators.clone(); + idx + }) + }) + .flatten() + } + }; + + if self.gossip { + if let Some(idx) = idx { + if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { + self.send_on_links( + vec![( + idx, + Details { + zid: true, + locators: true, + links: false, + }, + )], + |link| link.zid != zid, + ); + } + + if !self.autoconnect.is_empty() { + // Connect discovered peers + if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = locators { + let runtime = self.runtime.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + } + return changes; + } + + // Add nodes to graph & filter out up to date states + let mut link_states = link_states + .into_iter() + .filter_map( + |(zid, whatami, locators, sn, links)| match self.get_idx(&zid) { + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + if oldsn < sn { + node.sn = sn; + node.links = links.clone(); + if locators.is_some() { + node.locators = locators; + } + if oldsn == 0 { + Some((links, idx, true)) + } else { + Some((links, idx, false)) + } + } else { + None + } + } + None => { + let node = Node { + zid, + whatami: Some(whatami), + locators, + sn, + links: links.clone(), + }; + log::debug!("{} Add node (state) {}", self.name, zid); + let idx = self.add_node(node); + Some((links, idx, true)) + } + }, + ) + .collect::, NodeIndex, bool)>>(); + + // Add/remove edges from graph + let mut reintroduced_nodes = vec![]; + for (links, idx1, _) in &link_states { + for link in links { + if let Some(idx2) = self.get_idx(link) { + if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { + log::trace!( + "{} Update edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.update_edge(*idx1, idx2); + } + } else { + let node = Node { + zid: *link, + whatami: None, + locators: None, + sn: 0, + links: vec![], + }; + log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); + let idx = self.add_node(node); + reintroduced_nodes.push((vec![], idx, true)); + } + } + let mut edges = vec![]; + let mut neighbors = self.graph.neighbors_undirected(*idx1).detach(); + while let Some(edge) = neighbors.next(&self.graph) { + edges.push(edge); + } + for (eidx, idx2) in edges { + if !links.contains(&self.graph[idx2].zid) { + log::trace!( + "{} Remove edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.graph.remove_edge(eidx); + } + } + } + link_states.extend(reintroduced_nodes); + + let removed = self.remove_detached_nodes(); + let link_states = link_states + .into_iter() + .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) + .collect::, NodeIndex, bool)>>(); + + if !self.autoconnect.is_empty() { + // Connect discovered peers + for (_, idx, _) in &link_states { + let node = &self.graph[*idx]; + if let Some(whatami) = node.whatami { + if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = &node.locators { + let runtime = self.runtime.clone(); + let zid = node.zid; + let locators = locators.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + + // Propagate link states + // Note: we need to send all states at once for each face + // to avoid premature node deletion on the other side + #[allow(clippy::type_complexity)] + if !link_states.is_empty() { + let (new_idxs, updated_idxs): ( + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + ) = link_states.into_iter().partition(|(_, _, new)| *new); + let new_idxs = new_idxs + .into_iter() + .map(|(_, idx1, _new_node)| { + ( + idx1, + Details { + zid: true, + locators: self.propagate_locators(idx1), + links: true, + }, + ) + }) + .collect::>(); + for link in self.links.values() { + if link.zid != src { + let updated_idxs: Vec<(NodeIndex, Details)> = updated_idxs + .clone() + .into_iter() + .filter_map(|(_, idx1, _)| { + if link.zid != self.graph[idx1].zid { + Some(( + idx1, + Details { + zid: false, + locators: self.propagate_locators(idx1), + links: true, + }, + )) + } else { + None + } + }) + .collect(); + if !new_idxs.is_empty() || !updated_idxs.is_empty() { + self.send_on_link( + [&new_idxs[..], &updated_idxs[..]].concat(), + &link.transport, + ); + } + } else if !new_idxs.is_empty() { + self.send_on_link(new_idxs.clone(), &link.transport); + } + } + } + Changes { + updated_nodes: vec![], + removed_nodes: removed, + } + } + + pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { + let free_index = { + let mut i = 0; + while self.links.contains_key(i) { + i += 1; + } + i + }; + self.links.insert(free_index, Link::new(transport.clone())); + + let zid = transport.get_zid().unwrap(); + let whatami = transport.get_whatami().unwrap(); + + if self.full_linkstate || self.router_peers_failover_brokering { + let (idx, new) = match self.get_idx(&zid) { + Some(idx) => (idx, false), + None => { + log::debug!("{} Add node (link) {}", self.name, zid); + ( + self.add_node(Node { + zid, + whatami: Some(whatami), + locators: None, + sn: 0, + links: vec![], + }), + true, + ) + } + }; + if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { + log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); + self.update_edge(self.idx, idx); + } + self.graph[self.idx].links.push(zid); + self.graph[self.idx].sn += 1; + + // Send updated self linkstate on all existing links except new one + self.links + .values() + .filter(|link| { + link.zid != zid + && (self.full_linkstate + || link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router) + }) + .for_each(|link| { + self.send_on_link( + if new || (!self.full_linkstate && !self.gossip_multihop) { + vec![ + ( + idx, + Details { + zid: true, + locators: false, + links: false, + }, + ), + ( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + ), + ] + } else { + vec![( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + )] + }, + &link.transport, + ) + }); + } + + // Send all nodes linkstate on new link + let idxs = self + .graph + .node_indices() + .filter_map(|idx| { + (self.full_linkstate + || self.gossip_multihop + || self.links.values().any(|link| link.zid == zid) + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router)) + .then(|| { + ( + idx, + Details { + zid: true, + locators: self.propagate_locators(idx), + links: self.full_linkstate + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router), + }, + ) + }) + }) + .collect(); + self.send_on_link(idxs, &transport); + free_index + } + + pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + log::trace!("{} remove_link {}", self.name, zid); + self.links.retain(|_, link| link.zid != *zid); + self.graph[self.idx].links.retain(|link| *link != *zid); + + if self.full_linkstate { + if let Some((edge, _)) = self + .get_idx(zid) + .and_then(|idx| self.graph.find_edge_undirected(self.idx, idx)) + { + self.graph.remove_edge(edge); + } + let removed = self.remove_detached_nodes(); + + self.graph[self.idx].sn += 1; + + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |_| true, + ); + + removed + } else { + if let Some(idx) = self.get_idx(zid) { + self.graph.remove_node(idx); + } + if self.router_peers_failover_brokering { + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |link| { + link.zid != *zid + && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router + }, + ); + } + vec![] + } + } + + fn remove_detached_nodes(&mut self) -> Vec<(NodeIndex, Node)> { + let mut dfs_stack = vec![self.idx]; + let mut visit_map = self.graph.visit_map(); + while let Some(node) = dfs_stack.pop() { + if visit_map.visit(node) { + for succzid in &self.graph[node].links { + if let Some(succ) = self.get_idx(succzid) { + if !visit_map.is_visited(&succ) { + dfs_stack.push(succ); + } + } + } + } + } + + let mut removed = vec![]; + for idx in self.graph.node_indices().collect::>() { + if !visit_map.is_visited(&idx) { + log::debug!("Remove node {}", &self.graph[idx].zid); + removed.push((idx, self.graph.remove_node(idx).unwrap())); + } + } + removed + } + + pub(super) fn compute_trees(&mut self) -> Vec> { + let indexes = self.graph.node_indices().collect::>(); + let max_idx = indexes.iter().max().unwrap(); + + let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + + self.trees.clear(); + self.trees.resize_with(max_idx.index() + 1, || Tree { + parent: None, + childs: vec![], + directions: vec![], + }); + + for tree_root_idx in &indexes { + let paths = petgraph::algo::bellman_ford(&self.graph, *tree_root_idx).unwrap(); + + if tree_root_idx.index() == 0 { + self.distances = paths.distances; + } + + if log::log_enabled!(log::Level::Debug) { + let ps: Vec> = paths + .predecessors + .iter() + .enumerate() + .map(|(is, o)| { + o.map(|ip| { + format!( + "{} <- {}", + self.graph[ip].zid, + self.graph[NodeIndex::new(is)].zid + ) + }) + }) + .collect(); + log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); + } + + self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; + + for idx in &indexes { + if let Some(parent_idx) = paths.predecessors[idx.index()] { + if parent_idx == self.idx { + self.trees[tree_root_idx.index()].childs.push(*idx); + } + } + } + + self.trees[tree_root_idx.index()] + .directions + .resize_with(max_idx.index() + 1, || None); + let mut dfs = petgraph::algo::DfsSpace::new(&self.graph); + for destination in &indexes { + if self.idx != *destination + && petgraph::algo::has_path_connecting( + &self.graph, + self.idx, + *destination, + Some(&mut dfs), + ) + { + let mut direction = None; + let mut current = *destination; + while let Some(parent) = paths.predecessors[current.index()] { + if parent == self.idx { + direction = Some(current); + break; + } else { + current = parent; + } + } + + self.trees[tree_root_idx.index()].directions[destination.index()] = + match direction { + Some(direction) => Some(direction), + None => self.trees[tree_root_idx.index()].parent, + }; + } + } + } + + let mut new_childs = Vec::with_capacity(self.trees.len()); + new_childs.resize(self.trees.len(), vec![]); + + for i in 0..new_childs.len() { + new_childs[i] = if i < old_childs.len() { + self.trees[i] + .childs + .iter() + .filter(|idx| !old_childs[i].contains(idx)) + .cloned() + .collect() + } else { + self.trees[i].childs.clone() + }; + } + + new_childs + } + + #[inline] + pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + self.get_node(&node) + .map(|node| &node.links[..]) + .unwrap_or_default() + } +} + +#[inline] +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { + net1.graph + .node_references() + .filter_map(|(_, node1)| { + net2.graph + .node_references() + .any(|(_, node2)| node1.zid == node2.zid) + .then_some(node1.zid) + }) + .collect() +} diff --git a/zenoh/src/net/routing/hat/peer/pubsub.rs b/zenoh/src/net/routing/hat/peer/pubsub.rs new file mode 100644 index 0000000000..0405107f86 --- /dev/null +++ b/zenoh/src/net/routing/hat/peer/pubsub.rs @@ -0,0 +1,1619 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::pubsub::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::hat::HatPubSubTrait; +use super::network::Network; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_core::zread; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, Mode, UndeclareSubscriber, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[inline] +fn send_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + sub_info: &SubscriberInfo, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_subscription_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, + full_peer_net: bool, +) { + if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) + && !face_hat!(dst_face).local_subs.contains(res) + && match tables.whatami { + WhatAmI::Router => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) + } + } + WhatAmI::Peer => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + } + } + _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, + } + { + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }); + } +} + +fn propagate_simple_subscription( + tables: &mut Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, +) { + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + full_peer_net, + ); + } +} + +fn propagate_sourced_subscription( + tables: &Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + sub_info, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + if !res_hat!(res).router_subs.contains(&router) { + // Register router subscription + { + log::debug!( + "Register router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.insert(router); + hat_mut!(tables).router_subs.insert(res.clone()); + } + + // Propagate subscription to routers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); + } + // Propagate subscription to peers + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + register_peer_subscription(tables, face, res, sub_info, tables.zid) + } + + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); +} + +fn declare_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_subscription(&mut wtables, face, &mut res, sub_info, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_peer_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + if !res_hat!(res).peer_subs.contains(&peer) { + // Register peer subscription + { + log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); + res_hat_mut!(res).peer_subs.insert(peer); + hat_mut!(tables).peer_subs.insert(res.clone()); + } + + // Propagate subscription to peers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); + } +} + +fn declare_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); + if wtables.whatami == WhatAmI::Router { + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = wtables.zid; + register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_client_subscription( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, +) { + // Register subscription + { + let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + }, + None => { + res.session_ctxs.insert( + face.id, + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: Some(*sub_info), + qabl: None, + last_values: HashMap::new(), + }), + ); + } + } + } + face_hat_mut!(face).remote_subs.insert(res.clone()); +} + +fn declare_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, +) { + log::debug!("Register client subscription"); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_subscription(&mut wtables, face, &mut res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + match wtables.whatami { + WhatAmI::Router => { + let zid = wtables.zid; + register_router_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let zid = wtables.zid; + register_peer_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } else { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }) + } + } + } + _ => { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }) + } + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_subs(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.subs.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_subs.len() == 1 + && res_hat!(res).router_subs.contains(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_subs.contains(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(&mut face).local_subs.remove(res); + } + } + } +} + +fn propagate_forget_sourced_subscription( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + Some(tree_sid.index() as RoutingContext), + ); + } else { + log::trace!( + "Propagating forget sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.retain(|sub| sub != router); + + if res_hat!(res).router_subs.is_empty() { + hat_mut!(tables) + .router_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if hat_mut!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_subscription(tables, res); + } + + propagate_forget_simple_subscription_to_peers(tables, res); +} + +fn undeclare_router_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_subs.contains(router) { + unregister_router_subscription(tables, res, router); + propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router subscription!"), + }, + None => log::error!("Undeclare router subscription with unknown scope!"), + } +} + +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!( + "Unregister peer subscription {} (peer: {})", + res.expr(), + peer + ); + res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); + + if res_hat!(res).peer_subs.is_empty() { + hat_mut!(tables) + .peer_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_subscription(tables, res); + } + } +} + +fn undeclare_peer_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_subs.contains(peer) { + unregister_peer_subscription(tables, res, peer); + propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); + if wtables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(&wtables, &res); + let zid = wtables.zid; + if !client_subs && !peer_subs { + undeclare_router_subscription(&mut wtables, None, &mut res, &zid); + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer subscription!"), + }, + None => log::error!("Undeclare peer subscription with unknown scope!"), + } +} + +pub(super) fn undeclare_client_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); + + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + match tables.whatami { + WhatAmI::Router => { + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if client_subs.is_empty() { + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription(tables, res); + } + } + } + _ => { + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + } + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn forget_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_subscription(&mut wtables, face, &mut res); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown subscription!"), + }, + None => log::error!("Undeclare subscription with unknown scope!"), + } +} + +pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).router_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).peer_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } + WhatAmI::Client => { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } +} + +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + for mut res in hat!(tables) + .router_subs + .iter() + .filter(|res| res_hat!(res).router_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_router_subscription(tables, &mut res, node); + + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + WhatAmI::Peer => { + for mut res in hat!(tables) + .peer_subs + .iter() + .filter(|res| res_hat!(res).peer_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_subscription(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(tables, &res); + if !client_subs && !peer_subs { + undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); + } + } + + // compute_matches_data_routes(tables, &mut res); + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn pubsub_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate subs to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let subs_res = match net_type { + WhatAmI::Router => &hat!(tables).router_subs, + _ => &hat!(tables).peer_subs, + }; + + for res in subs_res { + let subs = match net_type { + WhatAmI::Router => &res_hat!(res).router_subs, + _ => &res_hat!(res).peer_subs, + }; + for sub in subs { + if *sub == tree_id { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + send_sourced_subscription_to_net_childs( + tables, + net, + tree_childs, + res, + None, + &sub_info, + tree_sid as RoutingContext, + ); + } + } + } + } + } + } + + // recompute routes + compute_data_routes_from(tables, &mut tables.root_res.clone()); +} + +pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid).cloned() { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_subs { + let client_subs = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); + if !remote_router_subs(tables, res) && !client_subs { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_subs.contains(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.subs.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }); + + face_hat_mut!(dst_face).local_subs.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // TODO + mode: Mode::Push, + }; + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } + } + } + } + } +} + +#[inline] +fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: RoutingContext, + subs: &HashSet, +) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +impl HatPubSubTrait for HatCode { + fn declare_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_subscription(tables, rtables, face, expr, sub_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + } + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) + } + } + _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } + } + + fn forget_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_subscription(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer) + } + } else { + forget_client_subscription(tables, rtables, face, expr) + } + } + _ => forget_client_subscription(tables, rtables, face, expr), + } + } + + fn compute_data_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return Arc::new(route); + } + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, *sid); + ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ) + }); + } + } + } + } + } + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + RoutingContext::default(), + ), + ); + } + Arc::new(route) + } + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } + Arc::new(pull_caches) + } + + fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes + } + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + } + } +} diff --git a/zenoh/src/net/routing/hat/peer/queries.rs b/zenoh/src/net/routing/hat/peer/queries.rs new file mode 100644 index 0000000000..7ee731e05a --- /dev/null +++ b/zenoh/src/net/routing/hat/peer/queries.rs @@ -0,0 +1,1761 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::queries::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; +use super::network::Network; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{ + QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, +}; +use crate::net::routing::PREFIX_LIVELINESS; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use ordered_float::OrderedFloat; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::HashMap; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_buffers::ZBuf; +use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[cfg(feature = "complete_n")] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if hat!(tables).full_net(WhatAmI::Peer) { + res.context.as_ref().and_then(|_| { + res_hat!(res) + .peer_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { + let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { + info = res_hat!(res) + .peer_qabls + .iter() + .fold(info, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(ctx.face.zid, face.zid) + { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +#[allow(clippy::too_many_arguments)] +#[inline] +fn send_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_simple_queryable( + tables: &mut Tables, + res: &Arc, + src_face: Option<&mut Arc>, +) { + let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); + let faces = tables.faces.values().cloned(); + for mut dst_face in faces { + let info = local_qabl_info(tables, res, &dst_face); + let current_info = face_hat!(dst_face).local_qabls.get(res); + if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + && (current_info.is_none() || *current_info.unwrap() != info) + && match tables.whatami { + WhatAmI::Router => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering( + src_face.as_ref().unwrap().zid, + dst_face.zid, + )) + } + } + WhatAmI::Peer => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + _ => { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + { + face_hat_mut!(&mut dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, &mut dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } +} + +fn propagate_sourced_queryable( + tables: &Tables, + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + qabl_info, + src_face, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + let current_info = res_hat!(res).router_qabls.get(&router); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register router queryable + { + log::debug!( + "Register router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.insert(router, *qabl_info); + hat_mut!(tables).router_qabls.insert(res.clone()); + } + + // Propagate queryable to routers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &router, + WhatAmI::Router, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + // Propagate queryable to peers + if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) + } + } + + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); +} + +fn declare_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register router queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_peer_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + let current_info = res_hat!(res).peer_qabls.get(&peer); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register peer queryable + { + log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); + hat_mut!(tables).peer_qabls.insert(res.clone()); + } + + // Propagate queryable to peers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &peer, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); + } +} + +fn declare_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register peer queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + let mut face = Some(face); + register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); + if wtables.whatami == WhatAmI::Router { + let local_info = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_client_queryable( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + qabl_info: &QueryableInfo, +) { + // Register queryable + { + let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); + get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + last_values: HashMap::new(), + }) + })) + .qabl = Some(*qabl_info); + } + face_hat_mut!(face).remote_qabls.insert(res.clone()); +} + +fn declare_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register client queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_queryable(&mut wtables, face, &mut res, qabl_info); + + match wtables.whatami { + WhatAmI::Router => { + let local_details = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let local_details = local_peer_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_peer_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } else { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + _ => { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_qabls + .keys() + .any(|router| router != &tables.zid) +} + +#[inline] +fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_qabls + .keys() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_qabls(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.qabl.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_qabls.len() == 1 + && res_hat!(res).router_qabls.contains_key(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_qabls.contains_key(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(&mut face).local_qabls.remove(res); + } + } + } +} + +fn propagate_forget_sourced_queryable( + tables: &mut Tables, + res: &mut Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.remove(router); + + if res_hat!(res).router_qabls.is_empty() { + hat_mut!(tables) + .router_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_queryable(tables, res); + } + + propagate_forget_simple_queryable_to_peers(tables, res); +} + +fn undeclare_router_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_qabls.contains_key(router) { + unregister_router_queryable(tables, res, router); + propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router queryable!"), + }, + None => log::error!("Undeclare router queryable with unknown scope!"), + } +} + +fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.remove(peer); + + if res_hat!(res).peer_qabls.is_empty() { + hat_mut!(tables) + .peer_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_queryable(tables, res); + } + } +} + +fn undeclare_peer_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_qabls.contains_key(peer) { + unregister_peer_queryable(tables, res, peer); + propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); + + if wtables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(&wtables, &res); + let zid = wtables.zid; + if !client_qabls && !peer_qabls { + undeclare_router_queryable(&mut wtables, None, &mut res, &zid); + } else { + let local_info = local_router_qabl_info(&wtables, &res); + register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); + } + } + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer queryable!"), + }, + None => log::error!("Undeclare peer queryable with unknown scope!"), + } +} + +pub(super) fn undeclare_client_queryable( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); + } + } + + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); + + match tables.whatami { + WhatAmI::Router => { + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } + } else if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + _ => { + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + } + + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn forget_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_queryable(&mut wtables, face, &mut res); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown queryable!"), + }, + None => log::error!("Undeclare queryable with unknown scope!"), + } +} + +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for qabl in &hat!(tables).peer_qabls { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } else { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } + WhatAmI::Client => { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } +} + +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_router_queryable(tables, &mut res, node); + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + } + } + WhatAmI::Peer => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_peer_queryable(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(tables, &res); + if !client_qabls && !peer_qabls { + undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, &res); + register_router_queryable(tables, None, &mut res, &local_info, tables.zid); + } + } + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid) { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_qabls { + let client_qabls = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); + if !remote_router_qabls(tables, res) && !client_qabls { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_qabls.contains_key(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.qabl.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(dst_face).local_qabls.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + let info = local_qabl_info(tables, res, dst_face); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } + } + } + } +} + +pub(super) fn queries_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate qabls to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let qabls_res = match net_type { + WhatAmI::Router => &hat!(tables).router_qabls, + _ => &hat!(tables).peer_qabls, + }; + + for res in qabls_res { + let qabls = match net_type { + WhatAmI::Router => &res_hat!(res).router_qabls, + _ => &res_hat!(res).peer_qabls, + }; + if let Some(qabl_info) = qabls.get(&tree_id) { + send_sourced_queryable_to_net_childs( + tables, + net, + tree_childs, + res, + qabl_info, + None, + tree_sid as RoutingContext, + ); + } + } + } + } + } + + // recompute routes + compute_query_routes_from(tables, &mut tables.root_res.clone()); +} + +#[inline] +#[allow(clippy::too_many_arguments)] +fn insert_target_for_qabls( + route: &mut QueryTargetQablSet, + expr: &mut RoutingExpr, + tables: &Tables, + net: &Network, + source: RoutingContext, + qabls: &HashMap, + complete: bool, +) { + if net.trees.len() > source as usize { + for (qabl, qabl_info) in qabls { + if let Some(qabl_idx) = net.get_idx(qabl) { + if net.trees[source as usize].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + if net.distances.len() > qabl_idx.index() { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), source), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: net.distances[qabl_idx.index()], + }); + } + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +lazy_static::lazy_static! { + static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); +} + +impl HatQueriesTrait for HatCode { + fn declare_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + } + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info) + } + } + _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } + } + + fn forget_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_queryable(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer) + } + } else { + forget_client_queryable(tables, rtables, face, expr) + } + } + _ => forget_client_queryable(tables, rtables, face, expr), + } + } + + fn compute_query_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return EMPTY_ROUTE.clone(); + } + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_qabls, + complete, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } + } + } + } + } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) + } + + #[inline] + fn compute_local_replies( + &self, + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, + ) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!res_hat!(mres).router_subs.is_empty() + || !res_hat!(mres).peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } + } + } + } + result + } + + fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes + } + + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + } + } +} diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs new file mode 100644 index 0000000000..d84128a037 --- /dev/null +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -0,0 +1,885 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use self::{ + network::{Network, shared_nodes}, + pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, +}; +use super::{super::dispatcher::{ + face::FaceState, + tables::{ + Resource, RoutingContext, + RoutingExpr, Tables, TablesLock, + }, +}, HatBaseTrait, HatTrait}; +use crate::{ + net::{ + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + }, + runtime::Runtime, +}; +use async_std::task::JoinHandle; +use std::{ + any::Any, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + sync::Arc, +}; +use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::{ + common::ZExtBody, + network::{ + declare::queryable::ext::QueryableInfo, + oam::id::OAM_LINKSTATE, + Oam, + }, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::{Mux, Primitives, TransportUnicast}; + +mod network; +mod pubsub; +mod queries; + +zconfigurable! { + static ref TREES_COMPUTATION_DELAY: u64 = 100; +} + + + +macro_rules! hat { + ($t:expr) => { + $t.hat.downcast_ref::().unwrap() + }; +} +use hat; + +macro_rules! hat_mut { + ($t:expr) => { + $t.hat.downcast_mut::().unwrap() + }; +} +use hat_mut; + +macro_rules! res_hat { + ($r:expr) => { + $r.context().hat.downcast_ref::().unwrap() + }; +} +use res_hat; + +macro_rules! res_hat_mut { + ($r:expr) => { + get_mut_unchecked($r) + .context_mut() + .hat + .downcast_mut::() + .unwrap() + }; +} +use res_hat_mut; + +macro_rules! face_hat { + ($f:expr) => { + $f.hat.downcast_ref::().unwrap() + }; +} +use face_hat; + +macro_rules! face_hat_mut { + ($f:expr) => { + get_mut_unchecked($f).hat.downcast_mut::().unwrap() + }; +} +use face_hat_mut; + + + +struct HatTables { + router_subs: HashSet>, + peer_subs: HashSet>, + router_qabls: HashSet>, + peer_qabls: HashSet>, + routers_net: Option, + peers_net: Option, + shared_nodes: Vec, + routers_trees_task: Option>, + peers_trees_task: Option>, + router_peers_failover_brokering: bool, +} + +impl HatTables { + fn new(router_peers_failover_brokering: bool) -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashSet::new(), + peer_qabls: HashSet::new(), + routers_net: None, + peers_net: None, + shared_nodes: vec![], + routers_trees_task: None, + peers_trees_task: None, + router_peers_failover_brokering, + } + } + + #[inline] + fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + match net_type { + WhatAmI::Router => self.routers_net.as_ref(), + WhatAmI::Peer => self.peers_net.as_ref(), + _ => None, + } + } + + #[inline] + fn full_net(&self, net_type: WhatAmI) -> bool { + match net_type { + WhatAmI::Router => self + .routers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + WhatAmI::Peer => self + .peers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + _ => false, + } + } + + #[inline] + fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + self.peers_net + .as_ref() + .unwrap() + .get_links(peer) + .iter() + .filter(move |nid| { + if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { + node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router + } else { + false + } + }) + } + + #[inline] + fn elect_router<'a>( + &'a self, + self_zid: &'a ZenohId, + key_expr: &str, + mut routers: impl Iterator, + ) -> &'a ZenohId { + match routers.next() { + None => self_zid, + Some(router) => { + let hash = |r: &ZenohId| { + let mut hasher = DefaultHasher::new(); + for b in key_expr.as_bytes() { + hasher.write_u8(*b); + } + for b in &r.to_le_bytes()[..r.size()] { + hasher.write_u8(*b); + } + hasher.finish() + }; + let mut res = router; + let mut h = None; + for router2 in routers { + let h2 = hash(router2); + if h2 > *h.get_or_insert_with(|| hash(res)) { + res = router2; + h = Some(h2); + } + } + res + } + } + } + + #[inline] + fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + // if source_links is empty then gossip is probably disabled in source peer + !source_links.is_empty() && !source_links.contains(&dest) + } + + #[inline] + fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + self.router_peers_failover_brokering + && self + .peers_net + .as_ref() + .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .unwrap_or(false) + } + + fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + log::trace!("Schedule computations"); + if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + { + let task = Some(async_std::task::spawn(async move { + async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); + + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, + }; + })); + match net_type { + WhatAmI::Router => self.routers_trees_task = task, + _ => self.peers_trees_task = task, + }; + } + } +} + +pub(crate) struct HatCode {} + +impl HatBaseTrait for HatCode { + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) { + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + } + + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new(router_peers_failover_brokering)) + } + + fn new_face(&self) -> Box { + Box::new(HatFace::new()) + } + + fn new_resource(&self) -> Box { + Box::new(HatContext::new()) + } + + fn new_local_face( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + primitives: Arc, + ) -> ZResult> { + let fid = tables.face_counter; + tables.face_counter += 1; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + tables.zid, + WhatAmI::Client, + #[cfg(feature = "stats")] + None, + primitives.clone(), + 0, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + Ok(newface) + } + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: TransportUnicast, + ) -> ZResult> { + let whatami = transport.get_whatami()?; + + let link_id = match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + let fid = tables.face_counter; + tables.face_counter += 1; + let zid = transport.get_zid()?; + #[cfg(feature = "stats")] + let stats = transport.get_stats()?; + let mut newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + Arc::new(Mux::new(transport)), + link_id, + None, + Box::new(HatFace::new()), + ) + }) + .clone(); + log::debug!("New {}", newface); + + pubsub_new_face(tables, &mut newface); + queries_new_face(tables, &mut newface); + + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + Ok(newface) + } + + fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + } + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + } + } + } + + Ok(()) + } + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: RoutingContext, + ) -> RoutingContext { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face.link_id) + } else { + 0 + } + } + _ => 0, + } + } + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + #[inline] + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) + } + + #[inline] + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false + } +} + +struct HatContext { + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, +} + +impl HatContext { + fn new() -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashMap::new(), + peer_qabls: HashMap::new(), + } + } +} + +struct HatFace { + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, +} + +impl HatFace { + fn new() -> Self { + Self { + local_subs: HashSet::new(), + remote_subs: HashSet::new(), + local_qabls: HashMap::new(), + remote_qabls: HashSet::new(), + } + } +} + +fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received router declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in routers network for {}", + face + ); + None + } + } +} + +fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { + match hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_link(face.link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received peer declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in peers network for {}", + face + ); + None + } + } +} + +impl HatTrait for HatCode {} \ No newline at end of file diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs new file mode 100644 index 0000000000..61b3f6c78a --- /dev/null +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -0,0 +1,1011 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::codec::Zenoh080Routing; +use crate::net::protocol::linkstate::{LinkState, LinkStateList}; +use crate::net::routing::dispatcher::tables::RoutingContext; +use crate::net::runtime::Runtime; +use async_std::task; +use petgraph::graph::NodeIndex; +use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; +use std::convert::TryInto; +use vec_map::VecMap; +use zenoh_buffers::writer::{DidntWrite, HasWriter}; +use zenoh_buffers::ZBuf; +use zenoh_codec::WCodec; +use zenoh_link::Locator; +use zenoh_protocol::common::ZExtBody; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::network::oam::id::OAM_LINKSTATE; +use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_transport::TransportUnicast; + +#[derive(Clone)] +struct Details { + zid: bool, + locators: bool, + links: bool, +} + +#[derive(Clone)] +pub(super) struct Node { + pub(super) zid: ZenohId, + pub(super) whatami: Option, + pub(super) locators: Option>, + pub(super) sn: u64, + pub(super) links: Vec, +} + +impl std::fmt::Debug for Node { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.zid) + } +} + +pub(super) struct Link { + pub(super) transport: TransportUnicast, + zid: ZenohId, + mappings: VecMap, + local_mappings: VecMap, +} + +impl Link { + fn new(transport: TransportUnicast) -> Self { + let zid = transport.get_zid().unwrap(); + Link { + transport, + zid, + mappings: VecMap::new(), + local_mappings: VecMap::new(), + } + } + + #[inline] + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + self.mappings.insert(psid.try_into().unwrap(), zid); + } + + #[inline] + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + self.mappings.get((*psid).try_into().unwrap()) + } + + #[inline] + pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { + self.local_mappings + .insert(psid.try_into().unwrap(), local_psid); + } + + #[inline] + pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { + self.local_mappings.get((*psid).try_into().unwrap()) + } +} + +pub(super) struct Changes { + pub(super) updated_nodes: Vec<(NodeIndex, Node)>, + pub(super) removed_nodes: Vec<(NodeIndex, Node)>, +} + +#[derive(Clone)] +pub(super) struct Tree { + pub(super) parent: Option, + pub(super) childs: Vec, + pub(super) directions: Vec>, +} + +pub(super) struct Network { + pub(super) name: String, + pub(super) full_linkstate: bool, + pub(super) router_peers_failover_brokering: bool, + pub(super) gossip: bool, + pub(super) gossip_multihop: bool, + pub(super) autoconnect: WhatAmIMatcher, + pub(super) idx: NodeIndex, + pub(super) links: VecMap, + pub(super) trees: Vec, + pub(super) distances: Vec, + pub(super) graph: petgraph::stable_graph::StableUnGraph, + pub(super) runtime: Runtime, +} + +impl Network { + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + name: String, + zid: ZenohId, + runtime: Runtime, + full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) -> Self { + let mut graph = petgraph::stable_graph::StableGraph::default(); + log::debug!("{} Add node (self) {}", name, zid); + let idx = graph.add_node(Node { + zid, + whatami: Some(runtime.whatami), + locators: None, + sn: 1, + links: vec![], + }); + Network { + name, + full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + idx, + links: VecMap::new(), + trees: vec![Tree { + parent: None, + childs: vec![], + directions: vec![None], + }], + distances: vec![0.0], + graph, + runtime, + } + } + + //noinspection ALL + // pub(super) fn dot(&self) -> String { + // std::format!( + // "{:?}", + // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) + // ) + // } + + #[inline] + pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + self.graph.node_weights().find(|weight| weight.zid == *zid) + } + + #[inline] + pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + self.graph + .node_indices() + .find(|idx| self.graph[*idx].zid == *zid) + } + + #[inline] + pub(super) fn get_link(&self, id: usize) -> Option<&Link> { + self.links.get(id) + } + + #[inline] + pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + self.links.values().find(|link| link.zid == *zid) + } + + #[inline] + pub(super) fn get_local_context( + &self, + context: RoutingContext, + link_id: usize, + ) -> RoutingContext { + match self.get_link(link_id) { + Some(link) => match link.get_local_psid(&(context as u64)) { + Some(psid) => (*psid).try_into().unwrap_or(0), + None => { + log::error!( + "Cannot find local psid for context {} on link {}", + context, + link_id + ); + 0 + } + }, + None => { + log::error!("Cannot find link {}", link_id); + 0 + } + } + } + + fn add_node(&mut self, node: Node) -> NodeIndex { + let zid = node.zid; + let idx = self.graph.add_node(node); + for link in self.links.values_mut() { + if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { + link.local_mappings.insert(psid, idx.index() as u64); + } + } + idx + } + + fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { + let links = if details.links { + self.graph[idx] + .links + .iter() + .filter_map(|zid| { + if let Some(idx2) = self.get_idx(zid) { + Some(idx2.index().try_into().unwrap()) + } else { + log::error!( + "{} Internal error building link state: cannot get index of {}", + self.name, + zid + ); + None + } + }) + .collect() + } else { + vec![] + }; + LinkState { + psid: idx.index().try_into().unwrap(), + sn: self.graph[idx].sn, + zid: if details.zid { + Some(self.graph[idx].zid) + } else { + None + }, + whatami: self.graph[idx].whatami, + locators: if details.locators { + if idx == self.idx { + Some(self.runtime.get_locators()) + } else { + self.graph[idx].locators.clone() + } + } else { + None + }, + links, + } + } + + fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { + let mut link_states = vec![]; + for (idx, details) in idxs { + link_states.push(self.make_link_state(idx, details)); + } + let codec = Zenoh080Routing::new(); + let mut buf = ZBuf::empty(); + codec.write(&mut buf.writer(), &LinkStateList { link_states })?; + Ok(NetworkBody::OAM(Oam { + id: OAM_LINKSTATE, + body: ZExtBody::ZBuf(buf), + ext_qos: oam::ext::QoSType::oam_default(), + ext_tstamp: None, + }) + .into()) + } + + fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { + if let Ok(msg) = self.make_msg(idxs) { + log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + if let Err(e) = transport.schedule(msg) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) + where + P: FnMut(&Link) -> bool, + { + if let Ok(msg) = self.make_msg(idxs) { + for link in self.links.values() { + if parameters(link) { + log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + if let Err(e) = link.transport.schedule(msg.clone()) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + // Indicates if locators should be included when propagating Linkstate message + // from the given node. + // Returns true if gossip is enabled and if multihop gossip is enabled or + // the node is one of self neighbours. + fn propagate_locators(&self, idx: NodeIndex) -> bool { + self.gossip + && (self.gossip_multihop + || idx == self.idx + || self.links.values().any(|link| { + self.graph + .node_weight(idx) + .map(|node| link.zid == node.zid) + .unwrap_or(true) + })) + } + + fn update_edge(&mut self, idx1: NodeIndex, idx2: NodeIndex) { + use std::hash::Hasher; + let mut hasher = std::collections::hash_map::DefaultHasher::default(); + if self.graph[idx1].zid > self.graph[idx2].zid { + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + } else { + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + } + let weight = 100.0 + ((hasher.finish() as u32) as f64) / u32::MAX as f64; + self.graph.update_edge(idx1, idx2, weight); + } + + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + + let graph = &self.graph; + let links = &mut self.links; + + let src_link = match links.values_mut().find(|link| link.zid == src) { + Some(link) => link, + None => { + log::error!( + "{} Received LinkStateList from unknown link {}", + self.name, + src + ); + return Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + } + }; + + // register psid<->zid mappings & apply mapping to nodes + #[allow(clippy::needless_collect)] // need to release borrow on self + let link_states = link_states + .into_iter() + .filter_map(|link_state| { + if let Some(zid) = link_state.zid { + src_link.set_zid_mapping(link_state.psid, zid); + if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { + src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); + } + Some(( + zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )) + } else { + match src_link.get_zid(&link_state.psid) { + Some(zid) => Some(( + *zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )), + None => { + log::error!( + "Received LinkState from {} with unknown node mapping {}", + src, + link_state.psid + ); + None + } + } + } + }) + .collect::>(); + + // apply psid<->zid mapping to links + let src_link = self.get_link_from_zid(&src).unwrap(); + let link_states = link_states + .into_iter() + .map(|(zid, wai, locs, sn, links)| { + let links: Vec = links + .iter() + .filter_map(|l| { + if let Some(zid) = src_link.get_zid(l) { + Some(*zid) + } else { + log::error!( + "{} Received LinkState from {} with unknown link mapping {}", + self.name, + src, + l + ); + None + } + }) + .collect(); + (zid, wai, locs, sn, links) + }) + .collect::>(); + + // log::trace!( + // "{} Received from {} mapped: {:?}", + // self.name, + // src, + // link_states + // ); + for link_state in &link_states { + log::trace!( + "{} Received from {} mapped: {:?}", + self.name, + src, + link_state + ); + } + + if !self.full_linkstate { + let mut changes = Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + for (zid, whatami, locators, sn, links) in link_states.into_iter() { + let idx = match self.get_idx(&zid) { + None => { + let idx = self.add_node(Node { + zid, + whatami: Some(whatami), + locators: locators.clone(), + sn, + links, + }); + changes.updated_nodes.push((idx, self.graph[idx].clone())); + locators.is_some().then_some(idx) + } + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + (oldsn < sn) + .then(|| { + node.sn = sn; + node.links = links.clone(); + changes.updated_nodes.push((idx, node.clone())); + (node.locators != locators && locators.is_some()).then(|| { + node.locators = locators.clone(); + idx + }) + }) + .flatten() + } + }; + + if self.gossip { + if let Some(idx) = idx { + if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { + self.send_on_links( + vec![( + idx, + Details { + zid: true, + locators: true, + links: false, + }, + )], + |link| link.zid != zid, + ); + } + + if !self.autoconnect.is_empty() { + // Connect discovered peers + if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = locators { + let runtime = self.runtime.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + } + return changes; + } + + // Add nodes to graph & filter out up to date states + let mut link_states = link_states + .into_iter() + .filter_map( + |(zid, whatami, locators, sn, links)| match self.get_idx(&zid) { + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + if oldsn < sn { + node.sn = sn; + node.links = links.clone(); + if locators.is_some() { + node.locators = locators; + } + if oldsn == 0 { + Some((links, idx, true)) + } else { + Some((links, idx, false)) + } + } else { + None + } + } + None => { + let node = Node { + zid, + whatami: Some(whatami), + locators, + sn, + links: links.clone(), + }; + log::debug!("{} Add node (state) {}", self.name, zid); + let idx = self.add_node(node); + Some((links, idx, true)) + } + }, + ) + .collect::, NodeIndex, bool)>>(); + + // Add/remove edges from graph + let mut reintroduced_nodes = vec![]; + for (links, idx1, _) in &link_states { + for link in links { + if let Some(idx2) = self.get_idx(link) { + if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { + log::trace!( + "{} Update edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.update_edge(*idx1, idx2); + } + } else { + let node = Node { + zid: *link, + whatami: None, + locators: None, + sn: 0, + links: vec![], + }; + log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); + let idx = self.add_node(node); + reintroduced_nodes.push((vec![], idx, true)); + } + } + let mut edges = vec![]; + let mut neighbors = self.graph.neighbors_undirected(*idx1).detach(); + while let Some(edge) = neighbors.next(&self.graph) { + edges.push(edge); + } + for (eidx, idx2) in edges { + if !links.contains(&self.graph[idx2].zid) { + log::trace!( + "{} Remove edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.graph.remove_edge(eidx); + } + } + } + link_states.extend(reintroduced_nodes); + + let removed = self.remove_detached_nodes(); + let link_states = link_states + .into_iter() + .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) + .collect::, NodeIndex, bool)>>(); + + if !self.autoconnect.is_empty() { + // Connect discovered peers + for (_, idx, _) in &link_states { + let node = &self.graph[*idx]; + if let Some(whatami) = node.whatami { + if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = &node.locators { + let runtime = self.runtime.clone(); + let zid = node.zid; + let locators = locators.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + + // Propagate link states + // Note: we need to send all states at once for each face + // to avoid premature node deletion on the other side + #[allow(clippy::type_complexity)] + if !link_states.is_empty() { + let (new_idxs, updated_idxs): ( + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + ) = link_states.into_iter().partition(|(_, _, new)| *new); + let new_idxs = new_idxs + .into_iter() + .map(|(_, idx1, _new_node)| { + ( + idx1, + Details { + zid: true, + locators: self.propagate_locators(idx1), + links: true, + }, + ) + }) + .collect::>(); + for link in self.links.values() { + if link.zid != src { + let updated_idxs: Vec<(NodeIndex, Details)> = updated_idxs + .clone() + .into_iter() + .filter_map(|(_, idx1, _)| { + if link.zid != self.graph[idx1].zid { + Some(( + idx1, + Details { + zid: false, + locators: self.propagate_locators(idx1), + links: true, + }, + )) + } else { + None + } + }) + .collect(); + if !new_idxs.is_empty() || !updated_idxs.is_empty() { + self.send_on_link( + [&new_idxs[..], &updated_idxs[..]].concat(), + &link.transport, + ); + } + } else if !new_idxs.is_empty() { + self.send_on_link(new_idxs.clone(), &link.transport); + } + } + } + Changes { + updated_nodes: vec![], + removed_nodes: removed, + } + } + + pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { + let free_index = { + let mut i = 0; + while self.links.contains_key(i) { + i += 1; + } + i + }; + self.links.insert(free_index, Link::new(transport.clone())); + + let zid = transport.get_zid().unwrap(); + let whatami = transport.get_whatami().unwrap(); + + if self.full_linkstate || self.router_peers_failover_brokering { + let (idx, new) = match self.get_idx(&zid) { + Some(idx) => (idx, false), + None => { + log::debug!("{} Add node (link) {}", self.name, zid); + ( + self.add_node(Node { + zid, + whatami: Some(whatami), + locators: None, + sn: 0, + links: vec![], + }), + true, + ) + } + }; + if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { + log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); + self.update_edge(self.idx, idx); + } + self.graph[self.idx].links.push(zid); + self.graph[self.idx].sn += 1; + + // Send updated self linkstate on all existing links except new one + self.links + .values() + .filter(|link| { + link.zid != zid + && (self.full_linkstate + || link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router) + }) + .for_each(|link| { + self.send_on_link( + if new || (!self.full_linkstate && !self.gossip_multihop) { + vec![ + ( + idx, + Details { + zid: true, + locators: false, + links: false, + }, + ), + ( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + ), + ] + } else { + vec![( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + )] + }, + &link.transport, + ) + }); + } + + // Send all nodes linkstate on new link + let idxs = self + .graph + .node_indices() + .filter_map(|idx| { + (self.full_linkstate + || self.gossip_multihop + || self.links.values().any(|link| link.zid == zid) + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router)) + .then(|| { + ( + idx, + Details { + zid: true, + locators: self.propagate_locators(idx), + links: self.full_linkstate + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router), + }, + ) + }) + }) + .collect(); + self.send_on_link(idxs, &transport); + free_index + } + + pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + log::trace!("{} remove_link {}", self.name, zid); + self.links.retain(|_, link| link.zid != *zid); + self.graph[self.idx].links.retain(|link| *link != *zid); + + if self.full_linkstate { + if let Some((edge, _)) = self + .get_idx(zid) + .and_then(|idx| self.graph.find_edge_undirected(self.idx, idx)) + { + self.graph.remove_edge(edge); + } + let removed = self.remove_detached_nodes(); + + self.graph[self.idx].sn += 1; + + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |_| true, + ); + + removed + } else { + if let Some(idx) = self.get_idx(zid) { + self.graph.remove_node(idx); + } + if self.router_peers_failover_brokering { + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |link| { + link.zid != *zid + && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router + }, + ); + } + vec![] + } + } + + fn remove_detached_nodes(&mut self) -> Vec<(NodeIndex, Node)> { + let mut dfs_stack = vec![self.idx]; + let mut visit_map = self.graph.visit_map(); + while let Some(node) = dfs_stack.pop() { + if visit_map.visit(node) { + for succzid in &self.graph[node].links { + if let Some(succ) = self.get_idx(succzid) { + if !visit_map.is_visited(&succ) { + dfs_stack.push(succ); + } + } + } + } + } + + let mut removed = vec![]; + for idx in self.graph.node_indices().collect::>() { + if !visit_map.is_visited(&idx) { + log::debug!("Remove node {}", &self.graph[idx].zid); + removed.push((idx, self.graph.remove_node(idx).unwrap())); + } + } + removed + } + + pub(super) fn compute_trees(&mut self) -> Vec> { + let indexes = self.graph.node_indices().collect::>(); + let max_idx = indexes.iter().max().unwrap(); + + let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + + self.trees.clear(); + self.trees.resize_with(max_idx.index() + 1, || Tree { + parent: None, + childs: vec![], + directions: vec![], + }); + + for tree_root_idx in &indexes { + let paths = petgraph::algo::bellman_ford(&self.graph, *tree_root_idx).unwrap(); + + if tree_root_idx.index() == 0 { + self.distances = paths.distances; + } + + if log::log_enabled!(log::Level::Debug) { + let ps: Vec> = paths + .predecessors + .iter() + .enumerate() + .map(|(is, o)| { + o.map(|ip| { + format!( + "{} <- {}", + self.graph[ip].zid, + self.graph[NodeIndex::new(is)].zid + ) + }) + }) + .collect(); + log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); + } + + self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; + + for idx in &indexes { + if let Some(parent_idx) = paths.predecessors[idx.index()] { + if parent_idx == self.idx { + self.trees[tree_root_idx.index()].childs.push(*idx); + } + } + } + + self.trees[tree_root_idx.index()] + .directions + .resize_with(max_idx.index() + 1, || None); + let mut dfs = petgraph::algo::DfsSpace::new(&self.graph); + for destination in &indexes { + if self.idx != *destination + && petgraph::algo::has_path_connecting( + &self.graph, + self.idx, + *destination, + Some(&mut dfs), + ) + { + let mut direction = None; + let mut current = *destination; + while let Some(parent) = paths.predecessors[current.index()] { + if parent == self.idx { + direction = Some(current); + break; + } else { + current = parent; + } + } + + self.trees[tree_root_idx.index()].directions[destination.index()] = + match direction { + Some(direction) => Some(direction), + None => self.trees[tree_root_idx.index()].parent, + }; + } + } + } + + let mut new_childs = Vec::with_capacity(self.trees.len()); + new_childs.resize(self.trees.len(), vec![]); + + for i in 0..new_childs.len() { + new_childs[i] = if i < old_childs.len() { + self.trees[i] + .childs + .iter() + .filter(|idx| !old_childs[i].contains(idx)) + .cloned() + .collect() + } else { + self.trees[i].childs.clone() + }; + } + + new_childs + } + + #[inline] + pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + self.get_node(&node) + .map(|node| &node.links[..]) + .unwrap_or_default() + } +} + +#[inline] +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { + net1.graph + .node_references() + .filter_map(|(_, node1)| { + net2.graph + .node_references() + .any(|(_, node2)| node1.zid == node2.zid) + .then_some(node1.zid) + }) + .collect() +} diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs new file mode 100644 index 0000000000..0405107f86 --- /dev/null +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -0,0 +1,1619 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::pubsub::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::hat::HatPubSubTrait; +use super::network::Network; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_core::zread; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, Mode, UndeclareSubscriber, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[inline] +fn send_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + sub_info: &SubscriberInfo, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_subscription_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, + full_peer_net: bool, +) { + if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) + && !face_hat!(dst_face).local_subs.contains(res) + && match tables.whatami { + WhatAmI::Router => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) + } + } + WhatAmI::Peer => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + } + } + _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, + } + { + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }); + } +} + +fn propagate_simple_subscription( + tables: &mut Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, +) { + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + full_peer_net, + ); + } +} + +fn propagate_sourced_subscription( + tables: &Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + sub_info, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + if !res_hat!(res).router_subs.contains(&router) { + // Register router subscription + { + log::debug!( + "Register router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.insert(router); + hat_mut!(tables).router_subs.insert(res.clone()); + } + + // Propagate subscription to routers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); + } + // Propagate subscription to peers + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + register_peer_subscription(tables, face, res, sub_info, tables.zid) + } + + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); +} + +fn declare_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_subscription(&mut wtables, face, &mut res, sub_info, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_peer_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + if !res_hat!(res).peer_subs.contains(&peer) { + // Register peer subscription + { + log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); + res_hat_mut!(res).peer_subs.insert(peer); + hat_mut!(tables).peer_subs.insert(res.clone()); + } + + // Propagate subscription to peers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); + } +} + +fn declare_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); + if wtables.whatami == WhatAmI::Router { + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = wtables.zid; + register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_client_subscription( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, +) { + // Register subscription + { + let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + }, + None => { + res.session_ctxs.insert( + face.id, + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: Some(*sub_info), + qabl: None, + last_values: HashMap::new(), + }), + ); + } + } + } + face_hat_mut!(face).remote_subs.insert(res.clone()); +} + +fn declare_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, +) { + log::debug!("Register client subscription"); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_subscription(&mut wtables, face, &mut res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + match wtables.whatami { + WhatAmI::Router => { + let zid = wtables.zid; + register_router_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let zid = wtables.zid; + register_peer_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } else { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }) + } + } + } + _ => { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }) + } + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_subs(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.subs.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_subs.len() == 1 + && res_hat!(res).router_subs.contains(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_subs.contains(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(&mut face).local_subs.remove(res); + } + } + } +} + +fn propagate_forget_sourced_subscription( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + Some(tree_sid.index() as RoutingContext), + ); + } else { + log::trace!( + "Propagating forget sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.retain(|sub| sub != router); + + if res_hat!(res).router_subs.is_empty() { + hat_mut!(tables) + .router_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if hat_mut!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_subscription(tables, res); + } + + propagate_forget_simple_subscription_to_peers(tables, res); +} + +fn undeclare_router_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_subs.contains(router) { + unregister_router_subscription(tables, res, router); + propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router subscription!"), + }, + None => log::error!("Undeclare router subscription with unknown scope!"), + } +} + +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!( + "Unregister peer subscription {} (peer: {})", + res.expr(), + peer + ); + res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); + + if res_hat!(res).peer_subs.is_empty() { + hat_mut!(tables) + .peer_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_subscription(tables, res); + } + } +} + +fn undeclare_peer_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_subs.contains(peer) { + unregister_peer_subscription(tables, res, peer); + propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); + if wtables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(&wtables, &res); + let zid = wtables.zid; + if !client_subs && !peer_subs { + undeclare_router_subscription(&mut wtables, None, &mut res, &zid); + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer subscription!"), + }, + None => log::error!("Undeclare peer subscription with unknown scope!"), + } +} + +pub(super) fn undeclare_client_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); + + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + match tables.whatami { + WhatAmI::Router => { + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if client_subs.is_empty() { + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription(tables, res); + } + } + } + _ => { + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + } + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn forget_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_subscription(&mut wtables, face, &mut res); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown subscription!"), + }, + None => log::error!("Undeclare subscription with unknown scope!"), + } +} + +pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).router_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).peer_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } + WhatAmI::Client => { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } +} + +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + for mut res in hat!(tables) + .router_subs + .iter() + .filter(|res| res_hat!(res).router_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_router_subscription(tables, &mut res, node); + + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + WhatAmI::Peer => { + for mut res in hat!(tables) + .peer_subs + .iter() + .filter(|res| res_hat!(res).peer_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_subscription(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(tables, &res); + if !client_subs && !peer_subs { + undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); + } + } + + // compute_matches_data_routes(tables, &mut res); + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn pubsub_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate subs to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let subs_res = match net_type { + WhatAmI::Router => &hat!(tables).router_subs, + _ => &hat!(tables).peer_subs, + }; + + for res in subs_res { + let subs = match net_type { + WhatAmI::Router => &res_hat!(res).router_subs, + _ => &res_hat!(res).peer_subs, + }; + for sub in subs { + if *sub == tree_id { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + send_sourced_subscription_to_net_childs( + tables, + net, + tree_childs, + res, + None, + &sub_info, + tree_sid as RoutingContext, + ); + } + } + } + } + } + } + + // recompute routes + compute_data_routes_from(tables, &mut tables.root_res.clone()); +} + +pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid).cloned() { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_subs { + let client_subs = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); + if !remote_router_subs(tables, res) && !client_subs { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_subs.contains(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.subs.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }); + + face_hat_mut!(dst_face).local_subs.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // TODO + mode: Mode::Push, + }; + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }); + } + } + } + } + } + } + } +} + +#[inline] +fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: RoutingContext, + subs: &HashSet, +) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +impl HatPubSubTrait for HatCode { + fn declare_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_subscription(tables, rtables, face, expr, sub_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + } + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) + } + } + _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } + } + + fn forget_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_subscription(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer) + } + } else { + forget_client_subscription(tables, rtables, face, expr) + } + } + _ => forget_client_subscription(tables, rtables, face, expr), + } + } + + fn compute_data_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return Arc::new(route); + } + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, *sid); + ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ) + }); + } + } + } + } + } + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + RoutingContext::default(), + ), + ); + } + Arc::new(route) + } + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } + Arc::new(pull_caches) + } + + fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes + } + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + } + } +} diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs new file mode 100644 index 0000000000..7ee731e05a --- /dev/null +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -0,0 +1,1761 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::queries::*; +use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; +use super::network::Network; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::tables::{ + QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, +}; +use crate::net::routing::PREFIX_LIVELINESS; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use ordered_float::OrderedFloat; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::HashMap; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_buffers::ZBuf; +use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[cfg(feature = "complete_n")] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if hat!(tables).full_net(WhatAmI::Peer) { + res.context.as_ref().and_then(|_| { + res_hat!(res) + .peer_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { + let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { + info = res_hat!(res) + .peer_qabls + .iter() + .fold(info, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(ctx.face.zid, face.zid) + { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +#[allow(clippy::too_many_arguments)] +#[inline] +fn send_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_simple_queryable( + tables: &mut Tables, + res: &Arc, + src_face: Option<&mut Arc>, +) { + let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); + let faces = tables.faces.values().cloned(); + for mut dst_face in faces { + let info = local_qabl_info(tables, res, &dst_face); + let current_info = face_hat!(dst_face).local_qabls.get(res); + if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + && (current_info.is_none() || *current_info.unwrap() != info) + && match tables.whatami { + WhatAmI::Router => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering( + src_face.as_ref().unwrap().zid, + dst_face.zid, + )) + } + } + WhatAmI::Peer => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + _ => { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + { + face_hat_mut!(&mut dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, &mut dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } +} + +fn propagate_sourced_queryable( + tables: &Tables, + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + qabl_info, + src_face, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + let current_info = res_hat!(res).router_qabls.get(&router); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register router queryable + { + log::debug!( + "Register router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.insert(router, *qabl_info); + hat_mut!(tables).router_qabls.insert(res.clone()); + } + + // Propagate queryable to routers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &router, + WhatAmI::Router, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + // Propagate queryable to peers + if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) + } + } + + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); +} + +fn declare_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register router queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_peer_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + let current_info = res_hat!(res).peer_qabls.get(&peer); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register peer queryable + { + log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); + hat_mut!(tables).peer_qabls.insert(res.clone()); + } + + // Propagate queryable to peers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &peer, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); + } +} + +fn declare_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register peer queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + let mut face = Some(face); + register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); + if wtables.whatami == WhatAmI::Router { + let local_info = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_client_queryable( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + qabl_info: &QueryableInfo, +) { + // Register queryable + { + let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); + get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + last_values: HashMap::new(), + }) + })) + .qabl = Some(*qabl_info); + } + face_hat_mut!(face).remote_qabls.insert(res.clone()); +} + +fn declare_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register client queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_queryable(&mut wtables, face, &mut res, qabl_info); + + match wtables.whatami { + WhatAmI::Router => { + let local_details = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let local_details = local_peer_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_peer_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } else { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + _ => { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_qabls + .keys() + .any(|router| router != &tables.zid) +} + +#[inline] +fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_qabls + .keys() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_qabls(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.qabl.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: RoutingContext, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_qabls.len() == 1 + && res_hat!(res).router_qabls.contains_key(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_qabls.contains_key(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(&mut face).local_qabls.remove(res); + } + } + } +} + +fn propagate_forget_sourced_queryable( + tables: &mut Tables, + res: &mut Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + tree_sid.index() as RoutingContext, + ); + } else { + log::trace!( + "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.remove(router); + + if res_hat!(res).router_qabls.is_empty() { + hat_mut!(tables) + .router_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_queryable(tables, res); + } + + propagate_forget_simple_queryable_to_peers(tables, res); +} + +fn undeclare_router_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_qabls.contains_key(router) { + unregister_router_queryable(tables, res, router); + propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router queryable!"), + }, + None => log::error!("Undeclare router queryable with unknown scope!"), + } +} + +fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.remove(peer); + + if res_hat!(res).peer_qabls.is_empty() { + hat_mut!(tables) + .peer_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_queryable(tables, res); + } + } +} + +fn undeclare_peer_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_qabls.contains_key(peer) { + unregister_peer_queryable(tables, res, peer); + propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); + + if wtables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(&wtables, &res); + let zid = wtables.zid; + if !client_qabls && !peer_qabls { + undeclare_router_queryable(&mut wtables, None, &mut res, &zid); + } else { + let local_info = local_router_qabl_info(&wtables, &res); + register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); + } + } + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer queryable!"), + }, + None => log::error!("Undeclare peer queryable with unknown scope!"), + } +} + +pub(super) fn undeclare_client_queryable( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); + } + } + + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); + + match tables.whatami { + WhatAmI::Router => { + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } + } else if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + _ => { + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + } + + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn forget_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_queryable(&mut wtables, face, &mut res); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown queryable!"), + }, + None => log::error!("Undeclare queryable with unknown scope!"), + } +} + +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for qabl in &hat!(tables).peer_qabls { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } else { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } + WhatAmI::Client => { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } +} + +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_router_queryable(tables, &mut res, node); + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + } + } + WhatAmI::Peer => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_peer_queryable(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(tables, &res); + if !client_qabls && !peer_qabls { + undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, &res); + register_router_queryable(tables, None, &mut res, &local_info, tables.zid); + } + } + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid) { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_qabls { + let client_qabls = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); + if !remote_router_qabls(tables, res) && !client_qabls { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_qabls.contains_key(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.qabl.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }); + + face_hat_mut!(dst_face).local_qabls.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + let info = local_qabl_info(tables, res, dst_face); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }); + } + } + } + } + } + } + } +} + +pub(super) fn queries_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate qabls to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let qabls_res = match net_type { + WhatAmI::Router => &hat!(tables).router_qabls, + _ => &hat!(tables).peer_qabls, + }; + + for res in qabls_res { + let qabls = match net_type { + WhatAmI::Router => &res_hat!(res).router_qabls, + _ => &res_hat!(res).peer_qabls, + }; + if let Some(qabl_info) = qabls.get(&tree_id) { + send_sourced_queryable_to_net_childs( + tables, + net, + tree_childs, + res, + qabl_info, + None, + tree_sid as RoutingContext, + ); + } + } + } + } + } + + // recompute routes + compute_query_routes_from(tables, &mut tables.root_res.clone()); +} + +#[inline] +#[allow(clippy::too_many_arguments)] +fn insert_target_for_qabls( + route: &mut QueryTargetQablSet, + expr: &mut RoutingExpr, + tables: &Tables, + net: &Network, + source: RoutingContext, + qabls: &HashMap, + complete: bool, +) { + if net.trees.len() > source as usize { + for (qabl, qabl_info) in qabls { + if let Some(qabl_idx) = net.get_idx(qabl) { + if net.trees[source as usize].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + if net.distances.len() > qabl_idx.index() { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), source), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: net.distances[qabl_idx.index()], + }); + } + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +lazy_static::lazy_static! { + static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); +} + +impl HatQueriesTrait for HatCode { + fn declare_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + } + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info) + } + } + _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } + } + + fn forget_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: RoutingContext, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_queryable(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer) + } + } else { + forget_client_queryable(tables, rtables, face, expr) + } + } + _ => forget_client_queryable(tables, rtables, face, expr), + } + } + + fn compute_query_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: RoutingContext, + source_type: WhatAmI, + ) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return EMPTY_ROUTE.clone(); + } + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_qabls, + complete, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as RoutingContext, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: ( + context.face.clone(), + key_expr.to_owned(), + RoutingContext::default(), + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } + } + } + } + } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) + } + + #[inline] + fn compute_local_replies( + &self, + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, + ) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!res_hat!(mres).router_subs.is_empty() + || !res_hat!(mres).peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } + } + } + } + result + } + + fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + routes.peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + routes.peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + routes + } + + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as RoutingContext, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + RoutingContext::default(), + WhatAmI::Client, + )); + } + } + } +} diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 96f457621a..c697bd872b 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -17,7 +17,7 @@ pub use super::dispatcher::queries::*; pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; -use super::hat::HatCode; +use super::hat; use super::runtime::Runtime; use std::any::Any; use std::str::FromStr; @@ -60,7 +60,7 @@ impl Router { router_peers_failover_brokering, queries_default_timeout, )), - ctrl_lock: Box::new(Mutex::new(HatCode {})), + ctrl_lock: Mutex::new(hat::new_hat(whatami)), queries_lock: RwLock::new(()), }), } From 6b61780d1432b9dfb89b5f8bca311abca282fe6f Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 16 Nov 2023 16:35:43 +0100 Subject: [PATCH 011/122] Move Primitives --- io/zenoh-transport/src/lib.rs | 2 - zenoh/src/key_expr.rs | 3 +- zenoh/src/net/mod.rs | 4 +- .../src => zenoh/src/net}/primitives/demux.rs | 2 +- .../src => zenoh/src/net}/primitives/mod.rs | 6 --- .../src => zenoh/src/net}/primitives/mux.rs | 2 +- zenoh/src/net/routing/dispatcher/face.rs | 4 +- zenoh/src/net/routing/hat/client/mod.rs | 41 +++++++++---------- zenoh/src/net/routing/hat/client/pubsub.rs | 10 ++--- zenoh/src/net/routing/hat/client/queries.rs | 10 ++--- zenoh/src/net/routing/hat/mod.rs | 11 ++--- zenoh/src/net/routing/hat/peer/mod.rs | 41 +++++++++---------- zenoh/src/net/routing/hat/peer/pubsub.rs | 10 ++--- zenoh/src/net/routing/hat/peer/queries.rs | 10 ++--- zenoh/src/net/routing/hat/router/mod.rs | 41 +++++++++---------- zenoh/src/net/routing/hat/router/pubsub.rs | 10 ++--- zenoh/src/net/routing/hat/router/queries.rs | 10 ++--- zenoh/src/net/routing/router.rs | 8 +++- zenoh/src/net/runtime/adminspace.rs | 3 +- zenoh/src/net/runtime/mod.rs | 5 ++- zenoh/src/publication.rs | 3 +- zenoh/src/queryable.rs | 2 +- zenoh/src/session.rs | 2 +- 23 files changed, 113 insertions(+), 127 deletions(-) rename {io/zenoh-transport/src => zenoh/src/net}/primitives/demux.rs (97%) rename {io/zenoh-transport/src => zenoh/src/net}/primitives/mod.rs (94%) rename {io/zenoh-transport/src => zenoh/src/net}/primitives/mux.rs (98%) diff --git a/io/zenoh-transport/src/lib.rs b/io/zenoh-transport/src/lib.rs index 05240710f6..5aa6e352e9 100644 --- a/io/zenoh-transport/src/lib.rs +++ b/io/zenoh-transport/src/lib.rs @@ -20,7 +20,6 @@ mod common; mod manager; mod multicast; -mod primitives; pub mod unicast; #[cfg(feature = "stats")] @@ -31,7 +30,6 @@ mod shm; pub use manager::*; pub use multicast::*; -pub use primitives::*; use serde::Serialize; use std::any::Any; use std::sync::Arc; diff --git a/zenoh/src/key_expr.rs b/zenoh/src/key_expr.rs index ad41c30457..91ab0b8f7a 100644 --- a/zenoh/src/key_expr.rs +++ b/zenoh/src/key_expr.rs @@ -26,9 +26,8 @@ use zenoh_protocol::{ network::{declare, DeclareBody, Mapping, UndeclareKeyExpr}, }; use zenoh_result::ZResult; -use zenoh_transport::Primitives; -use crate::{prelude::Selector, Session, Undeclarable}; +use crate::{net::primitives::Primitives, prelude::Selector, Session, Undeclarable}; #[derive(Clone, Debug)] pub(crate) enum KeyExprInner<'a> { diff --git a/zenoh/src/net/mod.rs b/zenoh/src/net/mod.rs index b0b4be3f14..346426a630 100644 --- a/zenoh/src/net/mod.rs +++ b/zenoh/src/net/mod.rs @@ -20,13 +20,13 @@ #[doc(hidden)] pub(crate) mod codec; #[doc(hidden)] +pub(crate) mod primitives; +#[doc(hidden)] pub(crate) mod protocol; #[doc(hidden)] pub(crate) mod routing; #[doc(hidden)] pub mod runtime; -#[doc(hidden)] -pub(crate) use zenoh_transport as transport; #[cfg(test)] pub(crate) mod tests; diff --git a/io/zenoh-transport/src/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs similarity index 97% rename from io/zenoh-transport/src/primitives/demux.rs rename to zenoh/src/net/primitives/demux.rs index 260fffa11d..7e50af96e4 100644 --- a/io/zenoh-transport/src/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -12,11 +12,11 @@ // ZettaScale Zenoh Team, // use super::Primitives; -use crate::TransportPeerEventHandler; use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::ZResult; +use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { primitives: P, diff --git a/io/zenoh-transport/src/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs similarity index 94% rename from io/zenoh-transport/src/primitives/mod.rs rename to zenoh/src/net/primitives/mod.rs index b79682790f..3718b73ebd 100644 --- a/io/zenoh-transport/src/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -35,12 +35,6 @@ pub trait Primitives: Send + Sync { #[derive(Default)] pub struct DummyPrimitives; -impl DummyPrimitives { - pub fn new() -> Self { - Self - } -} - impl Primitives for DummyPrimitives { fn send_declare(&self, _msg: Declare) {} diff --git a/io/zenoh-transport/src/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs similarity index 98% rename from io/zenoh-transport/src/primitives/mux.rs rename to zenoh/src/net/primitives/mux.rs index 8783b8ff40..94956dd5e8 100644 --- a/io/zenoh-transport/src/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -11,11 +11,11 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::super::{TransportMulticast, TransportUnicast}; use super::Primitives; use zenoh_protocol::network::{ Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; +use zenoh_transport::{TransportMulticast, TransportUnicast}; pub struct Mux { handler: TransportUnicast, diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index c327f2df13..4cfc01992d 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -1,3 +1,5 @@ +use crate::net::primitives::Primitives; + // // Copyright (c) 2023 ZettaScale Technology // @@ -25,7 +27,7 @@ use zenoh_protocol::{ }; #[cfg(feature = "stats")] use zenoh_transport::stats::TransportStats; -use zenoh_transport::{Primitives, TransportMulticast}; +use zenoh_transport::TransportMulticast; pub struct FaceState { pub(crate) id: usize, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index d84128a037..4d187e0784 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -18,20 +18,25 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ - network::{Network, shared_nodes}, - pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, }; -use super::{super::dispatcher::{ - face::FaceState, - tables::{ - Resource, RoutingContext, - RoutingExpr, Tables, TablesLock, +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, }, -}, HatBaseTrait, HatTrait}; + HatBaseTrait, HatTrait, +}; use crate::{ net::{ codec::Zenoh080Routing, + primitives::{Mux, Primitives}, protocol::linkstate::LinkStateList, }, runtime::Runtime, @@ -46,15 +51,11 @@ use std::{ use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{ - declare::queryable::ext::QueryableInfo, - oam::id::OAM_LINKSTATE, - Oam, - }, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; -use zenoh_transport::{Mux, Primitives, TransportUnicast}; +use zenoh_transport::TransportUnicast; mod network; mod pubsub; @@ -64,8 +65,6 @@ zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } - - macro_rules! hat { ($t:expr) => { $t.hat.downcast_ref::().unwrap() @@ -112,8 +111,6 @@ macro_rules! face_hat_mut { } use face_hat_mut; - - struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, @@ -322,11 +319,11 @@ impl HatBaseTrait for HatCode { fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { Box::new(HatTables::new(router_peers_failover_brokering)) } - + fn new_face(&self) -> Box { Box::new(HatFace::new()) } - + fn new_resource(&self) -> Box { Box::new(HatContext::new()) } @@ -882,4 +879,4 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O } } -impl HatTrait for HatCode {} \ No newline at end of file +impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 0405107f86..9fc46ef88c 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::PREFIX_LIVELINESS; use crate::net::routing::hat::HatPubSubTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use crate::net::routing::PREFIX_LIVELINESS; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 7ee731e05a..9b729caade 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,18 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::hat::HatQueriesTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::PREFIX_LIVELINESS; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 7f2a1a7e9c..a393a7ebf0 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -24,11 +24,8 @@ use super::dispatcher::{ RoutingExpr, Tables, TablesLock, }, }; -use crate::runtime::Runtime; -use std::{ - any::Any, - sync::Arc, -}; +use crate::{net::primitives::Primitives, runtime::Runtime}; +use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ @@ -39,7 +36,7 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; -use zenoh_transport::{Primitives, TransportUnicast}; +use zenoh_transport::TransportUnicast; mod client; mod peer; @@ -194,4 +191,4 @@ pub(crate) fn new_hat(whatami: WhatAmI) -> Box { WhatAmI::Peer => Box::new(peer::HatCode {}), WhatAmI::Router => Box::new(router::HatCode {}), } -} \ No newline at end of file +} diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs index d84128a037..4d187e0784 100644 --- a/zenoh/src/net/routing/hat/peer/mod.rs +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -18,20 +18,25 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ - network::{Network, shared_nodes}, - pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, }; -use super::{super::dispatcher::{ - face::FaceState, - tables::{ - Resource, RoutingContext, - RoutingExpr, Tables, TablesLock, +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, }, -}, HatBaseTrait, HatTrait}; + HatBaseTrait, HatTrait, +}; use crate::{ net::{ codec::Zenoh080Routing, + primitives::{Mux, Primitives}, protocol::linkstate::LinkStateList, }, runtime::Runtime, @@ -46,15 +51,11 @@ use std::{ use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{ - declare::queryable::ext::QueryableInfo, - oam::id::OAM_LINKSTATE, - Oam, - }, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; -use zenoh_transport::{Mux, Primitives, TransportUnicast}; +use zenoh_transport::TransportUnicast; mod network; mod pubsub; @@ -64,8 +65,6 @@ zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } - - macro_rules! hat { ($t:expr) => { $t.hat.downcast_ref::().unwrap() @@ -112,8 +111,6 @@ macro_rules! face_hat_mut { } use face_hat_mut; - - struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, @@ -322,11 +319,11 @@ impl HatBaseTrait for HatCode { fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { Box::new(HatTables::new(router_peers_failover_brokering)) } - + fn new_face(&self) -> Box { Box::new(HatFace::new()) } - + fn new_resource(&self) -> Box { Box::new(HatContext::new()) } @@ -882,4 +879,4 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O } } -impl HatTrait for HatCode {} \ No newline at end of file +impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/peer/pubsub.rs b/zenoh/src/net/routing/hat/peer/pubsub.rs index 0405107f86..9fc46ef88c 100644 --- a/zenoh/src/net/routing/hat/peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/peer/pubsub.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::PREFIX_LIVELINESS; use crate::net::routing::hat::HatPubSubTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use crate::net::routing::PREFIX_LIVELINESS; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; diff --git a/zenoh/src/net/routing/hat/peer/queries.rs b/zenoh/src/net/routing/hat/peer/queries.rs index 7ee731e05a..9b729caade 100644 --- a/zenoh/src/net/routing/hat/peer/queries.rs +++ b/zenoh/src/net/routing/hat/peer/queries.rs @@ -11,18 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::hat::HatQueriesTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::PREFIX_LIVELINESS; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index d84128a037..4d187e0784 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -18,20 +18,25 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ - network::{Network, shared_nodes}, - pubsub::{pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, - queries::{queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable}, + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, }; -use super::{super::dispatcher::{ - face::FaceState, - tables::{ - Resource, RoutingContext, - RoutingExpr, Tables, TablesLock, +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, }, -}, HatBaseTrait, HatTrait}; + HatBaseTrait, HatTrait, +}; use crate::{ net::{ codec::Zenoh080Routing, + primitives::{Mux, Primitives}, protocol::linkstate::LinkStateList, }, runtime::Runtime, @@ -46,15 +51,11 @@ use std::{ use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, - network::{ - declare::queryable::ext::QueryableInfo, - oam::id::OAM_LINKSTATE, - Oam, - }, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; -use zenoh_transport::{Mux, Primitives, TransportUnicast}; +use zenoh_transport::TransportUnicast; mod network; mod pubsub; @@ -64,8 +65,6 @@ zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } - - macro_rules! hat { ($t:expr) => { $t.hat.downcast_ref::().unwrap() @@ -112,8 +111,6 @@ macro_rules! face_hat_mut { } use face_hat_mut; - - struct HatTables { router_subs: HashSet>, peer_subs: HashSet>, @@ -322,11 +319,11 @@ impl HatBaseTrait for HatCode { fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { Box::new(HatTables::new(router_peers_failover_brokering)) } - + fn new_face(&self) -> Box { Box::new(HatFace::new()) } - + fn new_resource(&self) -> Box { Box::new(HatContext::new()) } @@ -882,4 +879,4 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O } } -impl HatTrait for HatCode {} \ No newline at end of file +impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 0405107f86..9fc46ef88c 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -11,16 +11,16 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::PREFIX_LIVELINESS; use crate::net::routing::hat::HatPubSubTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use crate::net::routing::PREFIX_LIVELINESS; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 7ee731e05a..9b729caade 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -11,18 +11,18 @@ // Contributors: // ZettaScale Zenoh Team, // +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; -use crate::net::routing::hat::HatQueriesTrait; -use super::network::Network; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::PREFIX_LIVELINESS; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index c697bd872b..f761ccc2c9 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -1,3 +1,8 @@ +use crate::net::primitives::DeMux; +use crate::net::primitives::DummyPrimitives; +use crate::net::primitives::McastMux; +use crate::net::primitives::Primitives; + // // Copyright (c) 2023 ZettaScale Technology // @@ -29,8 +34,7 @@ use zenoh_link::Link; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{ - DeMux, DummyPrimitives, McastMux, Primitives, TransportMulticast, TransportPeer, - TransportPeerEventHandler, TransportUnicast, + TransportMulticast, TransportPeer, TransportPeerEventHandler, TransportUnicast, }; // use zenoh_collections::Timer; use zenoh_result::ZResult; diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 5094962046..789c618ed0 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -13,6 +13,7 @@ use super::routing::dispatcher::face::Face; use super::Runtime; use crate::key_expr::KeyExpr; +use crate::net::primitives::Primitives; use crate::plugins::sealed as plugins; use crate::prelude::sync::{Sample, SyncResolve}; use crate::queryable::Query; @@ -38,7 +39,7 @@ use zenoh_protocol::{ zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; -use zenoh_transport::{Primitives, TransportUnicast}; +use zenoh_transport::TransportUnicast; pub struct AdminContext { runtime: Runtime, diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index acacd4c64a..20db2540c4 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -20,6 +20,7 @@ mod adminspace; pub mod orchestrator; +use super::primitives::DeMux; use super::routing; use super::routing::dispatcher::face::Face; use super::routing::dispatcher::pubsub::full_reentrant_route_data; @@ -42,8 +43,8 @@ use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; use zenoh_transport::{ - DeMux, TransportEventHandler, TransportManager, TransportMulticast, - TransportMulticastEventHandler, TransportPeer, TransportPeerEventHandler, TransportUnicast, + TransportEventHandler, TransportManager, TransportMulticast, TransportMulticastEventHandler, + TransportPeer, TransportPeerEventHandler, TransportUnicast, }; pub struct RuntimeState { diff --git a/zenoh/src/publication.rs b/zenoh/src/publication.rs index 071eb97da9..99c863a7de 100644 --- a/zenoh/src/publication.rs +++ b/zenoh/src/publication.rs @@ -13,8 +13,7 @@ // //! Publishing primitives. - -use crate::net::transport::Primitives; +use crate::net::primitives::Primitives; use crate::prelude::*; use crate::sample::DataInfo; use crate::Encoding; diff --git a/zenoh/src/queryable.rs b/zenoh/src/queryable.rs index ed0560d759..adb2b6b2c3 100644 --- a/zenoh/src/queryable.rs +++ b/zenoh/src/queryable.rs @@ -15,6 +15,7 @@ //! Queryable primitives. use crate::handlers::{locked, DefaultHandler}; +use crate::net::primitives::Primitives; use crate::prelude::*; #[zenoh_macros::unstable] use crate::query::ReplyKeyExpr; @@ -31,7 +32,6 @@ use zenoh_protocol::network::{response, Mapping, RequestId, Response, ResponseFi use zenoh_protocol::zenoh::reply::ext::ConsolidationType; use zenoh_protocol::zenoh::{self, ResponseBody}; use zenoh_result::ZResult; -use zenoh_transport::Primitives; pub(crate) struct QueryInner { /// The key expression of this Query. diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 29a87c24d3..df90644a6b 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -20,9 +20,9 @@ use crate::info::*; use crate::key_expr::KeyExprInner; #[zenoh_macros::unstable] use crate::liveliness::{Liveliness, LivelinessTokenState}; +use crate::net::primitives::Primitives; use crate::net::routing::dispatcher::face::Face; use crate::net::runtime::Runtime; -use crate::net::transport::Primitives; use crate::prelude::Locality; use crate::prelude::{KeyExpr, Parameters}; use crate::publication::*; From 0a76ae38668d32f3bacf2c573341fe0ea73f6937 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 22 Nov 2023 17:45:42 +0100 Subject: [PATCH 012/122] Move link_id into HatFace --- zenoh/src/net/routing/dispatcher/face.rs | 3 --- zenoh/src/net/routing/hat/client/mod.rs | 16 +++++++++------- zenoh/src/net/routing/hat/peer/mod.rs | 16 +++++++++------- zenoh/src/net/routing/hat/router/mod.rs | 16 +++++++++------- zenoh/src/net/routing/router.rs | 2 -- 5 files changed, 27 insertions(+), 26 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 4cfc01992d..d2d1f994eb 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -36,7 +36,6 @@ pub struct FaceState { #[cfg(feature = "stats")] pub(crate) stats: Option>, pub(crate) primitives: Arc, - pub(crate) link_id: usize, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -52,7 +51,6 @@ impl FaceState { whatami: WhatAmI, #[cfg(feature = "stats")] stats: Option>, primitives: Arc, - link_id: usize, mcast_group: Option, hat: Box, ) -> Arc { @@ -63,7 +61,6 @@ impl FaceState { #[cfg(feature = "stats")] stats, primitives, - link_id, local_mappings: HashMap::new(), remote_mappings: HashMap::new(), next_qid: 0, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 4d187e0784..ffecc1e0e2 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -347,7 +347,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] None, primitives.clone(), - 0, None, Box::new(HatFace::new()), ) @@ -410,7 +409,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] Some(stats), Arc::new(Mux::new(transport)), - link_id, None, Box::new(HatFace::new()), ) @@ -418,6 +416,8 @@ impl HatBaseTrait for HatCode { .clone(); log::debug!("New {}", newface); + face_hat_mut!(&mut newface).link_id = link_id; + pubsub_new_face(tables, &mut newface); queries_new_face(tables, &mut newface); @@ -644,14 +644,14 @@ impl HatBaseTrait for HatCode { .routers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id), + .get_local_context(routing_context, face_hat!(face).link_id), WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -664,7 +664,7 @@ impl HatBaseTrait for HatCode { .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -808,6 +808,7 @@ impl HatContext { } struct HatFace { + link_id: usize, local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -817,6 +818,7 @@ struct HatFace { impl HatFace { fn new() -> Self { Self { + link_id: 0, local_subs: HashSet::new(), remote_subs: HashSet::new(), local_qabls: HashMap::new(), @@ -830,7 +832,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> .routers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), @@ -857,7 +859,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O .peers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs index 4d187e0784..ffecc1e0e2 100644 --- a/zenoh/src/net/routing/hat/peer/mod.rs +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -347,7 +347,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] None, primitives.clone(), - 0, None, Box::new(HatFace::new()), ) @@ -410,7 +409,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] Some(stats), Arc::new(Mux::new(transport)), - link_id, None, Box::new(HatFace::new()), ) @@ -418,6 +416,8 @@ impl HatBaseTrait for HatCode { .clone(); log::debug!("New {}", newface); + face_hat_mut!(&mut newface).link_id = link_id; + pubsub_new_face(tables, &mut newface); queries_new_face(tables, &mut newface); @@ -644,14 +644,14 @@ impl HatBaseTrait for HatCode { .routers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id), + .get_local_context(routing_context, face_hat!(face).link_id), WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -664,7 +664,7 @@ impl HatBaseTrait for HatCode { .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -808,6 +808,7 @@ impl HatContext { } struct HatFace { + link_id: usize, local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -817,6 +818,7 @@ struct HatFace { impl HatFace { fn new() -> Self { Self { + link_id: 0, local_subs: HashSet::new(), remote_subs: HashSet::new(), local_qabls: HashMap::new(), @@ -830,7 +832,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> .routers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), @@ -857,7 +859,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O .peers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4d187e0784..ffecc1e0e2 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -347,7 +347,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] None, primitives.clone(), - 0, None, Box::new(HatFace::new()), ) @@ -410,7 +409,6 @@ impl HatBaseTrait for HatCode { #[cfg(feature = "stats")] Some(stats), Arc::new(Mux::new(transport)), - link_id, None, Box::new(HatFace::new()), ) @@ -418,6 +416,8 @@ impl HatBaseTrait for HatCode { .clone(); log::debug!("New {}", newface); + face_hat_mut!(&mut newface).link_id = link_id; + pubsub_new_face(tables, &mut newface); queries_new_face(tables, &mut newface); @@ -644,14 +644,14 @@ impl HatBaseTrait for HatCode { .routers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id), + .get_local_context(routing_context, face_hat!(face).link_id), WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -664,7 +664,7 @@ impl HatBaseTrait for HatCode { .peers_net .as_ref() .unwrap() - .get_local_context(routing_context, face.link_id) + .get_local_context(routing_context, face_hat!(face).link_id) } else { 0 } @@ -808,6 +808,7 @@ impl HatContext { } struct HatFace { + link_id: usize, local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -817,6 +818,7 @@ struct HatFace { impl HatFace { fn new() -> Self { Self { + link_id: 0, local_subs: HashSet::new(), remote_subs: HashSet::new(), local_qabls: HashMap::new(), @@ -830,7 +832,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> .routers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), @@ -857,7 +859,7 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> O .peers_net .as_ref() .unwrap() - .get_link(face.link_id) + .get_link(face_hat!(face).link_id) { Some(link) => match link.get_zid(&(nodeid as u64)) { Some(router) => Some(*router), diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index f761ccc2c9..600c150375 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -143,7 +143,6 @@ impl Router { #[cfg(feature = "stats")] None, Arc::new(McastMux::new(transport.clone())), - 0, Some(transport), hat_face, )); @@ -169,7 +168,6 @@ impl Router { #[cfg(feature = "stats")] Some(transport.get_stats().unwrap()), Arc::new(DummyPrimitives), - 0, Some(transport), tables.hat_code.new_face(), ); From ef541d8e4cf7bc46efc303a92c6cbe5fc870bba8 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 22 Nov 2023 18:07:49 +0100 Subject: [PATCH 013/122] Change face initialization --- zenoh/src/net/routing/hat/client/mod.rs | 77 +++++----------------- zenoh/src/net/routing/hat/mod.rs | 13 ++-- zenoh/src/net/routing/hat/peer/mod.rs | 77 +++++----------------- zenoh/src/net/routing/hat/router/mod.rs | 77 +++++----------------- zenoh/src/net/routing/router.rs | 87 +++++++++++++++++++------ 5 files changed, 119 insertions(+), 212 deletions(-) diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index ffecc1e0e2..4d20207cca 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -35,9 +35,7 @@ use super::{ }; use crate::{ net::{ - codec::Zenoh080Routing, - primitives::{Mux, Primitives}, - protocol::linkstate::LinkStateList, + codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, }, runtime::Runtime, }; @@ -332,43 +330,21 @@ impl HatBaseTrait for HatCode { &self, tables: &mut Tables, _tables_ref: &Arc, - primitives: Arc, - ) -> ZResult> { - let fid = tables.face_counter; - tables.face_counter += 1; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - tables.zid, - WhatAmI::Client, - #[cfg(feature = "stats")] - None, - primitives.clone(), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); - - Ok(newface) + face: &mut Face, + ) -> ZResult<()> { + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); + Ok(()) } fn new_transport_unicast_face( &self, tables: &mut Tables, tables_ref: &Arc, - transport: TransportUnicast, - ) -> ZResult> { - let whatami = transport.get_whatami()?; - - let link_id = match (tables.whatami, whatami) { + face: &mut Face, + transport: &TransportUnicast, + ) -> ZResult<()> { + let link_id = match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) .routers_net .as_mut() @@ -393,35 +369,12 @@ impl HatBaseTrait for HatCode { ); } - let fid = tables.face_counter; - tables.face_counter += 1; - let zid = transport.get_zid()?; - #[cfg(feature = "stats")] - let stats = transport.get_stats()?; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - Arc::new(Mux::new(transport)), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - face_hat_mut!(&mut newface).link_id = link_id; + face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); - match (tables.whatami, whatami) { + match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => { hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } @@ -434,7 +387,7 @@ impl HatBaseTrait for HatCode { } _ => (), } - Ok(newface) + Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index a393a7ebf0..3134721312 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -18,13 +18,13 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use super::dispatcher::{ - face::FaceState, + face::{Face, FaceState}, tables::{ DataRoutes, PullCaches, QueryRoutes, QueryTargetQablSet, Resource, Route, RoutingContext, RoutingExpr, Tables, TablesLock, }, }; -use crate::{net::primitives::Primitives, runtime::Runtime}; +use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; use zenoh_config::{WhatAmI, WhatAmIMatcher}; @@ -74,15 +74,16 @@ pub(crate) trait HatBaseTrait { &self, tables: &mut Tables, tables_ref: &Arc, - primitives: Arc, - ) -> ZResult>; + face: &mut Face, + ) -> ZResult<()>; fn new_transport_unicast_face( &self, tables: &mut Tables, tables_ref: &Arc, - transport: TransportUnicast, - ) -> ZResult>; + face: &mut Face, + transport: &TransportUnicast, + ) -> ZResult<()>; fn handle_oam( &self, diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs index ffecc1e0e2..4d20207cca 100644 --- a/zenoh/src/net/routing/hat/peer/mod.rs +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -35,9 +35,7 @@ use super::{ }; use crate::{ net::{ - codec::Zenoh080Routing, - primitives::{Mux, Primitives}, - protocol::linkstate::LinkStateList, + codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, }, runtime::Runtime, }; @@ -332,43 +330,21 @@ impl HatBaseTrait for HatCode { &self, tables: &mut Tables, _tables_ref: &Arc, - primitives: Arc, - ) -> ZResult> { - let fid = tables.face_counter; - tables.face_counter += 1; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - tables.zid, - WhatAmI::Client, - #[cfg(feature = "stats")] - None, - primitives.clone(), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); - - Ok(newface) + face: &mut Face, + ) -> ZResult<()> { + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); + Ok(()) } fn new_transport_unicast_face( &self, tables: &mut Tables, tables_ref: &Arc, - transport: TransportUnicast, - ) -> ZResult> { - let whatami = transport.get_whatami()?; - - let link_id = match (tables.whatami, whatami) { + face: &mut Face, + transport: &TransportUnicast, + ) -> ZResult<()> { + let link_id = match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) .routers_net .as_mut() @@ -393,35 +369,12 @@ impl HatBaseTrait for HatCode { ); } - let fid = tables.face_counter; - tables.face_counter += 1; - let zid = transport.get_zid()?; - #[cfg(feature = "stats")] - let stats = transport.get_stats()?; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - Arc::new(Mux::new(transport)), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - face_hat_mut!(&mut newface).link_id = link_id; + face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); - match (tables.whatami, whatami) { + match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => { hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } @@ -434,7 +387,7 @@ impl HatBaseTrait for HatCode { } _ => (), } - Ok(newface) + Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index ffecc1e0e2..4d20207cca 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -35,9 +35,7 @@ use super::{ }; use crate::{ net::{ - codec::Zenoh080Routing, - primitives::{Mux, Primitives}, - protocol::linkstate::LinkStateList, + codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, }, runtime::Runtime, }; @@ -332,43 +330,21 @@ impl HatBaseTrait for HatCode { &self, tables: &mut Tables, _tables_ref: &Arc, - primitives: Arc, - ) -> ZResult> { - let fid = tables.face_counter; - tables.face_counter += 1; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - tables.zid, - WhatAmI::Client, - #[cfg(feature = "stats")] - None, - primitives.clone(), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); - - Ok(newface) + face: &mut Face, + ) -> ZResult<()> { + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); + Ok(()) } fn new_transport_unicast_face( &self, tables: &mut Tables, tables_ref: &Arc, - transport: TransportUnicast, - ) -> ZResult> { - let whatami = transport.get_whatami()?; - - let link_id = match (tables.whatami, whatami) { + face: &mut Face, + transport: &TransportUnicast, + ) -> ZResult<()> { + let link_id = match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) .routers_net .as_mut() @@ -393,35 +369,12 @@ impl HatBaseTrait for HatCode { ); } - let fid = tables.face_counter; - tables.face_counter += 1; - let zid = transport.get_zid()?; - #[cfg(feature = "stats")] - let stats = transport.get_stats()?; - let mut newface = tables - .faces - .entry(fid) - .or_insert_with(|| { - FaceState::new( - fid, - zid, - whatami, - #[cfg(feature = "stats")] - Some(stats), - Arc::new(Mux::new(transport)), - None, - Box::new(HatFace::new()), - ) - }) - .clone(); - log::debug!("New {}", newface); - - face_hat_mut!(&mut newface).link_id = link_id; + face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut newface); - queries_new_face(tables, &mut newface); + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); - match (tables.whatami, whatami) { + match (tables.whatami, face.state.whatami) { (WhatAmI::Router, WhatAmI::Router) => { hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } @@ -434,7 +387,7 @@ impl HatBaseTrait for HatCode { } _ => (), } - Ok(newface) + Ok(()) } fn close_face(&self, tables: &TablesLock, face: &mut Arc) { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 600c150375..b1981c3143 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -1,6 +1,7 @@ use crate::net::primitives::DeMux; use crate::net::primitives::DummyPrimitives; use crate::net::primitives::McastMux; +use crate::net::primitives::Mux; use crate::net::primitives::Primitives; // @@ -96,19 +97,40 @@ impl Router { } pub fn new_primitives(&self, primitives: Arc) -> Arc { - Arc::new(Face { + let ctrl_lock = zlock!(self.tables.ctrl_lock); + let mut tables = zwrite!(self.tables.tables); + + let zid = tables.zid; + let fid = tables.face_counter; + tables.face_counter += 1; + let newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + WhatAmI::Client, + #[cfg(feature = "stats")] + None, + primitives.clone(), + None, + ctrl_lock.new_face(), + ) + }) + .clone(); + log::debug!("New {}", newface); + + let mut face = Face { tables: self.tables.clone(), - state: { - let ctrl_lock = zlock!(self.tables.ctrl_lock); - let mut tables = zwrite!(self.tables.tables); - let face = ctrl_lock - .new_local_face(&mut tables, &self.tables, primitives) - .unwrap(); - drop(tables); - drop(ctrl_lock); - face - }, - }) + state: newface, + }; + ctrl_lock + .new_local_face(&mut tables, &self.tables, &mut face) + .unwrap(); + drop(tables); + drop(ctrl_lock); + Arc::new(face) } pub fn new_transport_unicast( @@ -117,17 +139,42 @@ impl Router { ) -> ZResult> { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); + + let whatami = transport.get_whatami()?; + let fid = tables.face_counter; + tables.face_counter += 1; + let zid = transport.get_zid()?; + #[cfg(feature = "stats")] + let stats = transport.get_stats()?; + let newface = tables + .faces + .entry(fid) + .or_insert_with(|| { + FaceState::new( + fid, + zid, + whatami, + #[cfg(feature = "stats")] + Some(stats), + Arc::new(Mux::new(transport.clone())), + None, + ctrl_lock.new_face(), + ) + }) + .clone(); + log::debug!("New {}", newface); + + let mut face = Face { + tables: self.tables.clone(), + state: newface, + }; + + ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; + Ok(Arc::new(LinkStateInterceptor::new( transport.clone(), self.tables.clone(), - Face { - tables: self.tables.clone(), - state: ctrl_lock.new_transport_unicast_face( - &mut tables, - &self.tables, - transport, - )?, - }, + face, ))) } From 1755ba9142c47ccd55486a4833800702f2a4df21 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 22 Nov 2023 23:24:12 +0100 Subject: [PATCH 014/122] Interceptors --- zenoh/src/net/primitives/demux.rs | 14 +++- zenoh/src/net/primitives/mux.rs | 85 +++++++++++++++------- zenoh/src/net/routing/dispatcher/tables.rs | 4 + zenoh/src/net/routing/hat/client/mod.rs | 1 - zenoh/src/net/routing/interceptor/mod.rs | 74 +++++++++++++++++++ zenoh/src/net/routing/mod.rs | 1 + zenoh/src/net/routing/router.rs | 73 ++++++++++++++----- 7 files changed, 206 insertions(+), 46 deletions(-) create mode 100644 zenoh/src/net/routing/interceptor/mod.rs diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 7e50af96e4..2dee8ce650 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -12,6 +12,7 @@ // ZettaScale Zenoh Team, // use super::Primitives; +use crate::net::routing::interceptor::IngressObj; use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; @@ -20,16 +21,25 @@ use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { primitives: P, + pub(crate) intercept: IngressObj, } impl DeMux

{ - pub fn new(primitives: P) -> DeMux

{ - DeMux { primitives } + pub(crate) fn new(primitives: P, intercept: IngressObj) -> DeMux

{ + DeMux { + primitives, + intercept, + } } } impl TransportPeerEventHandler for DeMux

{ fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { + let msg = match self.intercept.intercept(msg) { + Some(msg) => msg, + None => return Ok(()), + }; + match msg.body { NetworkBody::Declare(m) => self.primitives.send_declare(m), NetworkBody::Push(m) => self.primitives.send_push(m), diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 94956dd5e8..f3c6f98430 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -12,60 +12,77 @@ // ZettaScale Zenoh Team, // use super::Primitives; +use crate::net::routing::interceptor::EgressObj; use zenoh_protocol::network::{ Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; use zenoh_transport::{TransportMulticast, TransportUnicast}; pub struct Mux { - handler: TransportUnicast, + pub handler: TransportUnicast, + pub(crate) intercept: EgressObj, } impl Mux { - pub fn new(handler: TransportUnicast) -> Mux { - Mux { handler } + pub(crate) fn new(handler: TransportUnicast, intercept: EgressObj) -> Mux { + Mux { handler, intercept } } } impl Primitives for Mux { fn send_declare(&self, msg: Declare) { - let _ = self.handler.schedule(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Declare(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_push(&self, msg: Push) { - let _ = self.handler.schedule(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Push(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_request(&self, msg: Request) { - let _ = self.handler.schedule(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Request(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_response(&self, msg: Response) { - let _ = self.handler.schedule(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Response(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_response_final(&self, msg: ResponseFinal) { - let _ = self.handler.schedule(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::ResponseFinal(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_close(&self) { @@ -74,54 +91,70 @@ impl Primitives for Mux { } pub struct McastMux { - handler: TransportMulticast, + pub handler: TransportMulticast, + pub(crate) intercept: EgressObj, } impl McastMux { - pub fn new(handler: TransportMulticast) -> McastMux { - McastMux { handler } + pub(crate) fn new(handler: TransportMulticast, intercept: EgressObj) -> McastMux { + McastMux { handler, intercept } } } impl Primitives for McastMux { fn send_declare(&self, msg: Declare) { - let _ = self.handler.handle_message(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Declare(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_push(&self, msg: Push) { - let _ = self.handler.handle_message(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Push(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_request(&self, msg: Request) { - let _ = self.handler.handle_message(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Request(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_response(&self, msg: Response) { - let _ = self.handler.handle_message(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::Response(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_response_final(&self, msg: ResponseFinal) { - let _ = self.handler.handle_message(NetworkMessage { + let msg = NetworkMessage { body: NetworkBody::ResponseFinal(msg), #[cfg(feature = "stats")] size: None, - }); + }; + if let Some(msg) = self.intercept.intercept(msg) { + let _ = self.handler.schedule(msg); + } } fn send_close(&self) { diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 6559296fbb..a3ffa57aeb 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -17,6 +17,8 @@ pub use super::queries::*; pub use super::resource::*; use crate::net::routing::hat; use crate::net::routing::hat::HatTrait; +use crate::net::routing::interceptor::interceptors; +use crate::net::routing::interceptor::Interceptor; use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Weak}; @@ -68,6 +70,7 @@ pub struct Tables { pub(crate) faces: HashMap>, pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, + pub(crate) interceptors: Vec>, pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // TODO make this a Box @@ -95,6 +98,7 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], + interceptors: interceptors(), pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 4d20207cca..9e861741df 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -370,7 +370,6 @@ impl HatBaseTrait for HatCode { } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs new file mode 100644 index 0000000000..a26790b9b9 --- /dev/null +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -0,0 +1,74 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use zenoh_protocol::network::NetworkMessage; +use zenoh_transport::{TransportMulticast, TransportUnicast}; + +pub(crate) trait Intercept { + fn intercept(&self, msg: NetworkMessage) -> Option; +} + +pub(crate) struct InterceptsChain { + pub(crate) intercepts: Vec>, +} + +impl InterceptsChain { + #[allow(dead_code)] + pub(crate) fn empty() -> Self { + Self { intercepts: vec![] } + } +} + +impl From>> for InterceptsChain { + fn from(intercepts: Vec>) -> Self { + InterceptsChain { intercepts } + } +} + +impl Intercept for InterceptsChain { + fn intercept(&self, mut msg: NetworkMessage) -> Option { + for intercept in &self.intercepts { + match intercept.intercept(msg) { + Some(newmsg) => msg = newmsg, + None => { + log::trace!("Msg intercepted!"); + return None; + } + } + } + Some(msg) + } +} + +pub(crate) type IngressObj = Box; +pub(crate) type EgressObj = Box; + +pub(crate) trait Interceptor { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option); + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; +} + +pub(crate) fn interceptors() -> Vec> { + // Add interceptors here + vec![] +} diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index c0da3bc0a0..597163c3e1 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -19,6 +19,7 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) pub mod dispatcher; pub mod hat; +pub mod interceptor; pub mod router; use super::runtime; diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index b1981c3143..bfee6a800c 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -1,9 +1,3 @@ -use crate::net::primitives::DeMux; -use crate::net::primitives::DummyPrimitives; -use crate::net::primitives::McastMux; -use crate::net::primitives::Mux; -use crate::net::primitives::Primitives; - // // Copyright (c) 2023 ZettaScale Technology // @@ -24,7 +18,15 @@ pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; use super::hat; +use super::interceptor::EgressObj; +use super::interceptor::InterceptsChain; use super::runtime::Runtime; +use crate::net::primitives::DeMux; +use crate::net::primitives::DummyPrimitives; +use crate::net::primitives::McastMux; +use crate::net::primitives::Mux; +use crate::net::primitives::Primitives; +use crate::net::routing::interceptor::IngressObj; use std::any::Any; use std::str::FromStr; use std::sync::Arc; @@ -146,6 +148,19 @@ impl Router { let zid = transport.get_zid()?; #[cfg(feature = "stats")] let stats = transport.get_stats()?; + let (ingress, egress): (Vec<_>, Vec<_>) = tables + .interceptors + .iter() + .map(|itor| itor.new_transport_unicast(&transport)) + .unzip(); + let (ingress, egress) = ( + Box::new(InterceptsChain::from( + ingress.into_iter().flatten().collect::>(), + )), + Box::new(InterceptsChain::from( + egress.into_iter().flatten().collect::>(), + )), + ); let newface = tables .faces .entry(fid) @@ -156,7 +171,7 @@ impl Router { whatami, #[cfg(feature = "stats")] Some(stats), - Arc::new(Mux::new(transport.clone())), + Arc::new(Mux::new(transport.clone(), egress)), None, ctrl_lock.new_face(), ) @@ -175,23 +190,31 @@ impl Router { transport.clone(), self.tables.clone(), face, + ingress, ))) } pub fn new_transport_multicast(&self, transport: TransportMulticast) -> ZResult<()> { + let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; - let hat_face = tables.hat_code.new_face(); + let intercept = Box::new(InterceptsChain::from( + tables + .interceptors + .iter() + .filter_map(|itor| itor.new_transport_multicast(&transport)) + .collect::>(), + )); tables.mcast_groups.push(FaceState::new( fid, ZenohId::from_str("1").unwrap(), WhatAmI::Peer, #[cfg(feature = "stats")] None, - Arc::new(McastMux::new(transport.clone())), + Arc::new(McastMux::new(transport.clone(), intercept)), Some(transport), - hat_face, + ctrl_lock.new_face(), )); // recompute routes @@ -205,9 +228,17 @@ impl Router { transport: TransportMulticast, peer: TransportPeer, ) -> ZResult>> { + let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; + let intercept = Box::new(InterceptsChain::from( + tables + .interceptors + .iter() + .filter_map(|itor| itor.new_peer_multicast(&transport)) + .collect::>(), + )); let face_state = FaceState::new( fid, peer.zid, @@ -216,17 +247,20 @@ impl Router { Some(transport.get_stats().unwrap()), Arc::new(DummyPrimitives), Some(transport), - tables.hat_code.new_face(), + ctrl_lock.new_face(), ); tables.mcast_faces.push(face_state.clone()); // recompute routes let mut root_res = tables.root_res.clone(); compute_data_routes_from(&mut tables, &mut root_res); - Ok(Arc::new(DeMux::new(Face { - tables: self.tables.clone(), - state: face_state, - }))) + Ok(Arc::new(DeMux::new( + Face { + tables: self.tables.clone(), + state: face_state, + }, + intercept, + ))) } } @@ -238,12 +272,17 @@ pub struct LinkStateInterceptor { } impl LinkStateInterceptor { - fn new(transport: TransportUnicast, tables: Arc, face: Face) -> Self { + fn new( + transport: TransportUnicast, + tables: Arc, + face: Face, + ingress: IngressObj, + ) -> Self { LinkStateInterceptor { transport, tables, face: face.clone(), - demux: DeMux::new(face), + demux: DeMux::new(face, ingress), } } } From cb693dca715bad342d877b04eba3979ddc78ded5 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 23 Nov 2023 14:51:11 +0100 Subject: [PATCH 015/122] Interceptor types renaming --- zenoh/src/net/primitives/demux.rs | 6 ++--- zenoh/src/net/primitives/mux.rs | 10 ++++---- zenoh/src/net/routing/dispatcher/tables.rs | 2 +- zenoh/src/net/routing/interceptor/mod.rs | 27 ++++++++++++---------- zenoh/src/net/routing/router.rs | 10 ++++---- 5 files changed, 29 insertions(+), 26 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 2dee8ce650..d326057098 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::Primitives; -use crate::net::routing::interceptor::IngressObj; +use crate::net::routing::interceptor::IngressIntercept; use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; @@ -21,11 +21,11 @@ use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { primitives: P, - pub(crate) intercept: IngressObj, + pub(crate) intercept: IngressIntercept, } impl DeMux

{ - pub(crate) fn new(primitives: P, intercept: IngressObj) -> DeMux

{ + pub(crate) fn new(primitives: P, intercept: IngressIntercept) -> DeMux

{ DeMux { primitives, intercept, diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index f3c6f98430..59f8ba99d6 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -12,7 +12,7 @@ // ZettaScale Zenoh Team, // use super::Primitives; -use crate::net::routing::interceptor::EgressObj; +use crate::net::routing::interceptor::EgressIntercept; use zenoh_protocol::network::{ Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; @@ -20,11 +20,11 @@ use zenoh_transport::{TransportMulticast, TransportUnicast}; pub struct Mux { pub handler: TransportUnicast, - pub(crate) intercept: EgressObj, + pub(crate) intercept: EgressIntercept, } impl Mux { - pub(crate) fn new(handler: TransportUnicast, intercept: EgressObj) -> Mux { + pub(crate) fn new(handler: TransportUnicast, intercept: EgressIntercept) -> Mux { Mux { handler, intercept } } } @@ -92,11 +92,11 @@ impl Primitives for Mux { pub struct McastMux { pub handler: TransportMulticast, - pub(crate) intercept: EgressObj, + pub(crate) intercept: EgressIntercept, } impl McastMux { - pub(crate) fn new(handler: TransportMulticast, intercept: EgressObj) -> McastMux { + pub(crate) fn new(handler: TransportMulticast, intercept: EgressIntercept) -> McastMux { McastMux { handler, intercept } } } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index a3ffa57aeb..90103b37b7 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -70,7 +70,7 @@ pub struct Tables { pub(crate) faces: HashMap>, pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, - pub(crate) interceptors: Vec>, + pub(crate) interceptors: Vec, pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // TODO make this a Box diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index a26790b9b9..1d41750efa 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -20,12 +20,12 @@ use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{TransportMulticast, TransportUnicast}; -pub(crate) trait Intercept { +pub(crate) trait InterceptTrait { fn intercept(&self, msg: NetworkMessage) -> Option; } pub(crate) struct InterceptsChain { - pub(crate) intercepts: Vec>, + pub(crate) intercepts: Vec, } impl InterceptsChain { @@ -35,13 +35,13 @@ impl InterceptsChain { } } -impl From>> for InterceptsChain { - fn from(intercepts: Vec>) -> Self { +impl From> for InterceptsChain { + fn from(intercepts: Vec) -> Self { InterceptsChain { intercepts } } } -impl Intercept for InterceptsChain { +impl InterceptTrait for InterceptsChain { fn intercept(&self, mut msg: NetworkMessage) -> Option { for intercept in &self.intercepts { match intercept.intercept(msg) { @@ -56,19 +56,22 @@ impl Intercept for InterceptsChain { } } -pub(crate) type IngressObj = Box; -pub(crate) type EgressObj = Box; +pub(crate) type Intercept = Box; +pub(crate) type IngressIntercept = Intercept; +pub(crate) type EgressIntercept = Intercept; -pub(crate) trait Interceptor { +pub(crate) trait InterceptorTrait { fn new_transport_unicast( &self, transport: &TransportUnicast, - ) -> (Option, Option); - fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; - fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; + ) -> (Option, Option); + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; } -pub(crate) fn interceptors() -> Vec> { +pub(crate) type Interceptor = Box; + +pub(crate) fn interceptors() -> Vec { // Add interceptors here vec![] } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index bfee6a800c..e32d213f79 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -18,7 +18,7 @@ pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; use super::hat; -use super::interceptor::EgressObj; +use super::interceptor::EgressIntercept; use super::interceptor::InterceptsChain; use super::runtime::Runtime; use crate::net::primitives::DeMux; @@ -26,7 +26,7 @@ use crate::net::primitives::DummyPrimitives; use crate::net::primitives::McastMux; use crate::net::primitives::Mux; use crate::net::primitives::Primitives; -use crate::net::routing::interceptor::IngressObj; +use crate::net::routing::interceptor::IngressIntercept; use std::any::Any; use std::str::FromStr; use std::sync::Arc; @@ -204,7 +204,7 @@ impl Router { .interceptors .iter() .filter_map(|itor| itor.new_transport_multicast(&transport)) - .collect::>(), + .collect::>(), )); tables.mcast_groups.push(FaceState::new( fid, @@ -237,7 +237,7 @@ impl Router { .interceptors .iter() .filter_map(|itor| itor.new_peer_multicast(&transport)) - .collect::>(), + .collect::>(), )); let face_state = FaceState::new( fid, @@ -276,7 +276,7 @@ impl LinkStateInterceptor { transport: TransportUnicast, tables: Arc, face: Face, - ingress: IngressObj, + ingress: IngressIntercept, ) -> Self { LinkStateInterceptor { transport, From c520168dc69d87e693020b8cd13119f5fea5bff4 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 23 Nov 2023 17:18:58 +0100 Subject: [PATCH 016/122] Rename RoutingContext --- zenoh/src/net/routing/dispatcher/pubsub.rs | 20 +++--- zenoh/src/net/routing/dispatcher/queries.rs | 20 +++--- zenoh/src/net/routing/dispatcher/resource.rs | 12 ++-- zenoh/src/net/routing/hat/client/mod.rs | 10 +-- zenoh/src/net/routing/hat/client/network.rs | 8 +-- zenoh/src/net/routing/hat/client/pubsub.rs | 70 ++++++++------------ zenoh/src/net/routing/hat/client/queries.rs | 64 ++++++++---------- zenoh/src/net/routing/hat/mod.rs | 18 ++--- zenoh/src/net/routing/hat/peer/mod.rs | 10 +-- zenoh/src/net/routing/hat/peer/network.rs | 8 +-- zenoh/src/net/routing/hat/peer/pubsub.rs | 70 ++++++++------------ zenoh/src/net/routing/hat/peer/queries.rs | 64 ++++++++---------- zenoh/src/net/routing/hat/router/mod.rs | 10 +-- zenoh/src/net/routing/hat/router/network.rs | 8 +-- zenoh/src/net/routing/hat/router/pubsub.rs | 70 ++++++++------------ zenoh/src/net/routing/hat/router/queries.rs | 64 ++++++++---------- 16 files changed, 227 insertions(+), 299 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 3b9e7f20d1..544866323c 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -13,7 +13,7 @@ // use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; -use super::tables::{RoutingContext, RoutingExpr, Tables}; +use super::tables::{NodeId, RoutingExpr, Tables}; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; @@ -109,7 +109,7 @@ macro_rules! treat_timestamp { // face: &FaceState, // res: &Option>, // expr: &mut RoutingExpr, -// routing_context: RoutingContext, +// routing_context: NodeId, // ) -> Arc { // let local_context = map_routing_context(tables, face, routing_context); // match tables.whatami { @@ -135,7 +135,7 @@ macro_rules! treat_timestamp { // compute_data_route( // tables, // expr, -// RoutingContext::default(), +// NodeId::default(), // face.whatami, // ) // }) @@ -143,9 +143,9 @@ macro_rules! treat_timestamp { // } // _ => res // .as_ref() -// .and_then(|res| res.routers_data_route(RoutingContext::default())) +// .and_then(|res| res.routers_data_route(NodeId::default())) // .unwrap_or_else(|| { -// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// compute_data_route(tables, expr, NodeId::default(), face.whatami) // }), // }, // WhatAmI::Peer => { @@ -163,12 +163,12 @@ macro_rules! treat_timestamp { // } // _ => res // .as_ref() -// .and_then(|res| res.peers_data_route(RoutingContext::default())) +// .and_then(|res| res.peers_data_route(NodeId::default())) // .unwrap_or_else(|| { // compute_data_route( // tables, // expr, -// RoutingContext::default(), +// NodeId::default(), // face.whatami, // ) // }), @@ -180,7 +180,7 @@ macro_rules! treat_timestamp { // _ => res.peer_data_route(), // }) // .unwrap_or_else(|| { -// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// compute_data_route(tables, expr, NodeId::default(), face.whatami) // }) // } // } @@ -188,7 +188,7 @@ macro_rules! treat_timestamp { // .as_ref() // .and_then(|res| res.client_data_route()) // .unwrap_or_else(|| { -// compute_data_route(tables, expr, RoutingContext::default(), face.whatami) +// compute_data_route(tables, expr, NodeId::default(), face.whatami) // }), // } // } @@ -251,7 +251,7 @@ pub fn full_reentrant_route_data( expr: &WireExpr, ext_qos: ext::QoSType, mut payload: PushBody, - routing_context: RoutingContext, + routing_context: NodeId, ) { let tables = zread!(tables_ref); match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index c12feb901e..cb856c5dd1 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -13,7 +13,7 @@ // use super::face::FaceState; use super::resource::{QueryRoute, QueryRoutes, QueryTargetQablSet, Resource}; -use super::tables::RoutingContext; +use super::tables::NodeId; use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; use std::collections::HashMap; @@ -263,7 +263,7 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc>, // expr: &mut RoutingExpr, -// routing_context: RoutingContext, +// routing_context: NodeId, // ) -> Arc { // match tables.whatami { // WhatAmI::Router => match face.whatami { @@ -292,7 +292,7 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc res // .as_ref() -// .and_then(|res| res.routers_query_route(RoutingContext::default())) +// .and_then(|res| res.routers_query_route(NodeId::default())) // .unwrap_or_else(|| { -// compute_query_route(tables, expr, RoutingContext::default(), face.whatami) +// compute_query_route(tables, expr, NodeId::default(), face.whatami) // }), // }, // WhatAmI::Peer => { @@ -320,12 +320,12 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc res // .as_ref() -// .and_then(|res| res.peers_query_route(RoutingContext::default())) +// .and_then(|res| res.peers_query_route(NodeId::default())) // .unwrap_or_else(|| { // compute_query_route( // tables, // expr, -// RoutingContext::default(), +// NodeId::default(), // face.whatami, // ) // }), @@ -337,7 +337,7 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc res.peer_query_route(), // }) // .unwrap_or_else(|| { -// compute_query_route(tables, expr, RoutingContext::default(), face.whatami) +// compute_query_route(tables, expr, NodeId::default(), face.whatami) // }) // } // } @@ -345,7 +345,7 @@ pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc, WireExpr<'static>, RoutingContext); +pub(crate) type Direction = (Arc, WireExpr<'static>, NodeId); pub(crate) type Route = HashMap; #[cfg(feature = "complete_n")] pub(crate) type QueryRoute = HashMap; @@ -205,7 +205,7 @@ impl Resource { } #[inline(always)] - pub fn routers_data_route(&self, context: RoutingContext) -> Option> { + pub fn routers_data_route(&self, context: NodeId) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_data_routes { @@ -221,7 +221,7 @@ impl Resource { } #[inline(always)] - pub fn peers_data_route(&self, context: RoutingContext) -> Option> { + pub fn peers_data_route(&self, context: NodeId) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_data_routes { @@ -266,7 +266,7 @@ impl Resource { // #[inline(always)] // pub(crate) fn routers_query_route( // &self, - // context: RoutingContext, + // context: NodeId, // ) -> Option> { // match &self.context { // Some(ctx) => { @@ -284,7 +284,7 @@ impl Resource { // #[inline(always)] // pub(crate) fn peers_query_route( // &self, - // context: RoutingContext, + // context: NodeId, // ) -> Option> { // match &self.context { // Some(ctx) => { diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 9e861741df..4ae063003e 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -29,7 +29,7 @@ use self::{ use super::{ super::dispatcher::{ face::FaceState, - tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, HatBaseTrait, HatTrait, }; @@ -588,8 +588,8 @@ impl HatBaseTrait for HatCode { &self, tables: &Tables, face: &FaceState, - routing_context: RoutingContext, - ) -> RoutingContext { + routing_context: NodeId, + ) -> NodeId { match tables.whatami { WhatAmI::Router => match face.whatami { WhatAmI::Router => hat!(tables) @@ -779,7 +779,7 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .routers_net .as_ref() @@ -806,7 +806,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net .as_ref() diff --git a/zenoh/src/net/routing/hat/client/network.rs b/zenoh/src/net/routing/hat/client/network.rs index 61b3f6c78a..421850dc87 100644 --- a/zenoh/src/net/routing/hat/client/network.rs +++ b/zenoh/src/net/routing/hat/client/network.rs @@ -13,7 +13,7 @@ // use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::RoutingContext; +use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; use async_std::task; use petgraph::graph::NodeIndex; @@ -191,11 +191,7 @@ impl Network { } #[inline] - pub(super) fn get_local_context( - &self, - context: RoutingContext, - link_id: usize, - ) -> RoutingContext { + pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { match self.get_link(link_id) { Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 9fc46ef88c..f18d778819 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; @@ -44,7 +44,7 @@ fn send_sourced_subscription_to_net_childs( res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -165,7 +165,7 @@ fn propagate_sourced_subscription( res, src_face, sub_info, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -551,7 +551,7 @@ fn send_forget_sourced_subscription_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: Option, + routing_context: Option, ) { for child in childs { if net.graph.contains_node(*child) { @@ -654,7 +654,7 @@ fn propagate_forget_sourced_subscription( &net.trees[tree_sid.index()].childs, res, src_face, - Some(tree_sid.index() as RoutingContext), + Some(tree_sid.index() as NodeId), ); } else { log::trace!( @@ -1105,7 +1105,7 @@ pub(super) fn pubsub_tree_change( res, None, &sub_info, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1202,7 +1202,7 @@ fn insert_faces_for_subs( expr: &RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, subs: &HashSet, ) { if net.trees.len() > source as usize { @@ -1236,7 +1236,7 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1265,7 +1265,7 @@ impl HatPubSubTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1293,7 +1293,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = HashMap::new(); @@ -1333,7 +1333,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1351,7 +1351,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1368,7 +1368,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1394,11 +1394,7 @@ impl HatPubSubTrait for HatCode { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - ( - context.face.clone(), - key_expr.to_owned(), - RoutingContext::default(), - ) + (context.face.clone(), key_expr.to_owned(), NodeId::default()) }); } } @@ -1411,7 +1407,7 @@ impl HatPubSubTrait for HatCode { ( mcast_group.clone(), expr.full_expr().to_string().into(), - RoutingContext::default(), + NodeId::default(), ), ); } @@ -1471,17 +1467,13 @@ impl HatPubSubTrait for HatCode { routes.routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1502,7 +1494,7 @@ impl HatPubSubTrait for HatCode { routes.peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1511,21 +1503,17 @@ impl HatPubSubTrait for HatCode { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1555,7 +1543,7 @@ impl HatPubSubTrait for HatCode { routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1563,7 +1551,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1586,7 +1574,7 @@ impl HatPubSubTrait for HatCode { peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1595,13 +1583,13 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1609,7 +1597,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 9b729caade..a6302abc4e 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; @@ -197,7 +197,7 @@ fn send_sourced_queryable_to_net_childs( res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -308,7 +308,7 @@ fn propagate_sourced_queryable( res, qabl_info, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -667,7 +667,7 @@ fn send_forget_sourced_queryable_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -771,7 +771,7 @@ fn propagate_forget_sourced_queryable( &net.trees[tree_sid.index()].childs, res, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -1299,7 +1299,7 @@ pub(super) fn queries_tree_change( res, qabl_info, None, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1318,7 +1318,7 @@ fn insert_target_for_qabls( expr: &mut RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, qabls: &HashMap, complete: bool, ) { @@ -1365,7 +1365,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1394,7 +1394,7 @@ impl HatQueriesTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1422,7 +1422,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = QueryTargetQablSet::new(); @@ -1463,7 +1463,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1482,7 +1482,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1500,7 +1500,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1528,7 +1528,7 @@ impl HatQueriesTrait for HatCode { direction: ( context.face.clone(), key_expr.to_owned(), - RoutingContext::default(), + NodeId::default(), ), complete: if complete { qabl_info.complete as u64 @@ -1613,17 +1613,13 @@ impl HatQueriesTrait for HatCode { routes.routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1644,7 +1640,7 @@ impl HatQueriesTrait for HatCode { routes.peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1653,21 +1649,17 @@ impl HatQueriesTrait for HatCode { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1697,7 +1689,7 @@ impl HatQueriesTrait for HatCode { routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1705,7 +1697,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1729,7 +1721,7 @@ impl HatQueriesTrait for HatCode { peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1738,13 +1730,13 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1752,7 +1744,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 3134721312..eccd59a0aa 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -20,7 +20,7 @@ use super::dispatcher::{ face::{Face, FaceState}, tables::{ - DataRoutes, PullCaches, QueryRoutes, QueryTargetQablSet, Resource, Route, RoutingContext, + DataRoutes, NodeId, PullCaches, QueryRoutes, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock, }, }; @@ -97,8 +97,8 @@ pub(crate) trait HatBaseTrait { &self, tables: &Tables, face: &FaceState, - routing_context: RoutingContext, - ) -> RoutingContext; + routing_context: NodeId, + ) -> NodeId; fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool; @@ -127,21 +127,21 @@ pub(crate) trait HatPubSubTrait { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: RoutingContext, + node_id: NodeId, ); fn forget_subscription( &self, tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ); fn compute_data_route( &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc; @@ -159,20 +159,20 @@ pub(crate) trait HatQueriesTrait { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: RoutingContext, + node_id: NodeId, ); fn forget_queryable( &self, tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ); fn compute_query_route( &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc; fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc); diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs index 4d20207cca..bedeffe396 100644 --- a/zenoh/src/net/routing/hat/peer/mod.rs +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -29,7 +29,7 @@ use self::{ use super::{ super::dispatcher::{ face::FaceState, - tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, HatBaseTrait, HatTrait, }; @@ -589,8 +589,8 @@ impl HatBaseTrait for HatCode { &self, tables: &Tables, face: &FaceState, - routing_context: RoutingContext, - ) -> RoutingContext { + routing_context: NodeId, + ) -> NodeId { match tables.whatami { WhatAmI::Router => match face.whatami { WhatAmI::Router => hat!(tables) @@ -780,7 +780,7 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .routers_net .as_ref() @@ -807,7 +807,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net .as_ref() diff --git a/zenoh/src/net/routing/hat/peer/network.rs b/zenoh/src/net/routing/hat/peer/network.rs index 61b3f6c78a..421850dc87 100644 --- a/zenoh/src/net/routing/hat/peer/network.rs +++ b/zenoh/src/net/routing/hat/peer/network.rs @@ -13,7 +13,7 @@ // use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::RoutingContext; +use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; use async_std::task; use petgraph::graph::NodeIndex; @@ -191,11 +191,7 @@ impl Network { } #[inline] - pub(super) fn get_local_context( - &self, - context: RoutingContext, - link_id: usize, - ) -> RoutingContext { + pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { match self.get_link(link_id) { Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), diff --git a/zenoh/src/net/routing/hat/peer/pubsub.rs b/zenoh/src/net/routing/hat/peer/pubsub.rs index 9fc46ef88c..f18d778819 100644 --- a/zenoh/src/net/routing/hat/peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/peer/pubsub.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; @@ -44,7 +44,7 @@ fn send_sourced_subscription_to_net_childs( res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -165,7 +165,7 @@ fn propagate_sourced_subscription( res, src_face, sub_info, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -551,7 +551,7 @@ fn send_forget_sourced_subscription_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: Option, + routing_context: Option, ) { for child in childs { if net.graph.contains_node(*child) { @@ -654,7 +654,7 @@ fn propagate_forget_sourced_subscription( &net.trees[tree_sid.index()].childs, res, src_face, - Some(tree_sid.index() as RoutingContext), + Some(tree_sid.index() as NodeId), ); } else { log::trace!( @@ -1105,7 +1105,7 @@ pub(super) fn pubsub_tree_change( res, None, &sub_info, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1202,7 +1202,7 @@ fn insert_faces_for_subs( expr: &RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, subs: &HashSet, ) { if net.trees.len() > source as usize { @@ -1236,7 +1236,7 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1265,7 +1265,7 @@ impl HatPubSubTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1293,7 +1293,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = HashMap::new(); @@ -1333,7 +1333,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1351,7 +1351,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1368,7 +1368,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1394,11 +1394,7 @@ impl HatPubSubTrait for HatCode { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - ( - context.face.clone(), - key_expr.to_owned(), - RoutingContext::default(), - ) + (context.face.clone(), key_expr.to_owned(), NodeId::default()) }); } } @@ -1411,7 +1407,7 @@ impl HatPubSubTrait for HatCode { ( mcast_group.clone(), expr.full_expr().to_string().into(), - RoutingContext::default(), + NodeId::default(), ), ); } @@ -1471,17 +1467,13 @@ impl HatPubSubTrait for HatCode { routes.routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1502,7 +1494,7 @@ impl HatPubSubTrait for HatCode { routes.peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1511,21 +1503,17 @@ impl HatPubSubTrait for HatCode { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1555,7 +1543,7 @@ impl HatPubSubTrait for HatCode { routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1563,7 +1551,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1586,7 +1574,7 @@ impl HatPubSubTrait for HatCode { peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1595,13 +1583,13 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1609,7 +1597,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } diff --git a/zenoh/src/net/routing/hat/peer/queries.rs b/zenoh/src/net/routing/hat/peer/queries.rs index 9b729caade..a6302abc4e 100644 --- a/zenoh/src/net/routing/hat/peer/queries.rs +++ b/zenoh/src/net/routing/hat/peer/queries.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; @@ -197,7 +197,7 @@ fn send_sourced_queryable_to_net_childs( res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -308,7 +308,7 @@ fn propagate_sourced_queryable( res, qabl_info, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -667,7 +667,7 @@ fn send_forget_sourced_queryable_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -771,7 +771,7 @@ fn propagate_forget_sourced_queryable( &net.trees[tree_sid.index()].childs, res, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -1299,7 +1299,7 @@ pub(super) fn queries_tree_change( res, qabl_info, None, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1318,7 +1318,7 @@ fn insert_target_for_qabls( expr: &mut RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, qabls: &HashMap, complete: bool, ) { @@ -1365,7 +1365,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1394,7 +1394,7 @@ impl HatQueriesTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1422,7 +1422,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = QueryTargetQablSet::new(); @@ -1463,7 +1463,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1482,7 +1482,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1500,7 +1500,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1528,7 +1528,7 @@ impl HatQueriesTrait for HatCode { direction: ( context.face.clone(), key_expr.to_owned(), - RoutingContext::default(), + NodeId::default(), ), complete: if complete { qabl_info.complete as u64 @@ -1613,17 +1613,13 @@ impl HatQueriesTrait for HatCode { routes.routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1644,7 +1640,7 @@ impl HatQueriesTrait for HatCode { routes.peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1653,21 +1649,17 @@ impl HatQueriesTrait for HatCode { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1697,7 +1689,7 @@ impl HatQueriesTrait for HatCode { routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1705,7 +1697,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1729,7 +1721,7 @@ impl HatQueriesTrait for HatCode { peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1738,13 +1730,13 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1752,7 +1744,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4d20207cca..bedeffe396 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -29,7 +29,7 @@ use self::{ use super::{ super::dispatcher::{ face::FaceState, - tables::{Resource, RoutingContext, RoutingExpr, Tables, TablesLock}, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, }, HatBaseTrait, HatTrait, }; @@ -589,8 +589,8 @@ impl HatBaseTrait for HatCode { &self, tables: &Tables, face: &FaceState, - routing_context: RoutingContext, - ) -> RoutingContext { + routing_context: NodeId, + ) -> NodeId { match tables.whatami { WhatAmI::Router => match face.whatami { WhatAmI::Router => hat!(tables) @@ -780,7 +780,7 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .routers_net .as_ref() @@ -807,7 +807,7 @@ fn get_router(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> } } -fn get_peer(tables: &Tables, face: &Arc, nodeid: RoutingContext) -> Option { +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net .as_ref() diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs index 61b3f6c78a..421850dc87 100644 --- a/zenoh/src/net/routing/hat/router/network.rs +++ b/zenoh/src/net/routing/hat/router/network.rs @@ -13,7 +13,7 @@ // use crate::net::codec::Zenoh080Routing; use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::RoutingContext; +use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; use async_std::task; use petgraph::graph::NodeIndex; @@ -191,11 +191,7 @@ impl Network { } #[inline] - pub(super) fn get_local_context( - &self, - context: RoutingContext, - link_id: usize, - ) -> RoutingContext { + pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { match self.get_link(link_id) { Some(link) => match link.get_local_psid(&(context as u64)) { Some(psid) => (*psid).try_into().unwrap_or(0), diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 9fc46ef88c..f18d778819 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; @@ -44,7 +44,7 @@ fn send_sourced_subscription_to_net_childs( res: &Arc, src_face: Option<&Arc>, sub_info: &SubscriberInfo, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -165,7 +165,7 @@ fn propagate_sourced_subscription( res, src_face, sub_info, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -551,7 +551,7 @@ fn send_forget_sourced_subscription_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: Option, + routing_context: Option, ) { for child in childs { if net.graph.contains_node(*child) { @@ -654,7 +654,7 @@ fn propagate_forget_sourced_subscription( &net.trees[tree_sid.index()].childs, res, src_face, - Some(tree_sid.index() as RoutingContext), + Some(tree_sid.index() as NodeId), ); } else { log::trace!( @@ -1105,7 +1105,7 @@ pub(super) fn pubsub_tree_change( res, None, &sub_info, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1202,7 +1202,7 @@ fn insert_faces_for_subs( expr: &RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, subs: &HashSet, ) { if net.trees.len() > source as usize { @@ -1236,7 +1236,7 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1265,7 +1265,7 @@ impl HatPubSubTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1293,7 +1293,7 @@ impl HatPubSubTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = HashMap::new(); @@ -1333,7 +1333,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1351,7 +1351,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1368,7 +1368,7 @@ impl HatPubSubTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_faces_for_subs( &mut route, @@ -1394,11 +1394,7 @@ impl HatPubSubTrait for HatCode { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - ( - context.face.clone(), - key_expr.to_owned(), - RoutingContext::default(), - ) + (context.face.clone(), key_expr.to_owned(), NodeId::default()) }); } } @@ -1411,7 +1407,7 @@ impl HatPubSubTrait for HatCode { ( mcast_group.clone(), expr.full_expr().to_string().into(), - RoutingContext::default(), + NodeId::default(), ), ); } @@ -1471,17 +1467,13 @@ impl HatPubSubTrait for HatCode { routes.routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1502,7 +1494,7 @@ impl HatPubSubTrait for HatCode { routes.peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1511,21 +1503,17 @@ impl HatPubSubTrait for HatCode { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1555,7 +1543,7 @@ impl HatPubSubTrait for HatCode { routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1563,7 +1551,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1586,7 +1574,7 @@ impl HatPubSubTrait for HatCode { peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1595,13 +1583,13 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1609,7 +1597,7 @@ impl HatPubSubTrait for HatCode { res_mut.context_mut().client_data_route = Some(self.compute_data_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 9b729caade..a6302abc4e 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -16,7 +16,7 @@ use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; -use crate::net::routing::dispatcher::resource::{Resource, RoutingContext, SessionContext}; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; use crate::net::routing::dispatcher::tables::{ QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, }; @@ -197,7 +197,7 @@ fn send_sourced_queryable_to_net_childs( res: &Arc, qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -308,7 +308,7 @@ fn propagate_sourced_queryable( res, qabl_info, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -667,7 +667,7 @@ fn send_forget_sourced_queryable_to_net_childs( childs: &[NodeIndex], res: &Arc, src_face: Option<&Arc>, - routing_context: RoutingContext, + routing_context: NodeId, ) { for child in childs { if net.graph.contains_node(*child) { @@ -771,7 +771,7 @@ fn propagate_forget_sourced_queryable( &net.trees[tree_sid.index()].childs, res, src_face, - tree_sid.index() as RoutingContext, + tree_sid.index() as NodeId, ); } else { log::trace!( @@ -1299,7 +1299,7 @@ pub(super) fn queries_tree_change( res, qabl_info, None, - tree_sid as RoutingContext, + tree_sid as NodeId, ); } } @@ -1318,7 +1318,7 @@ fn insert_target_for_qabls( expr: &mut RoutingExpr, tables: &Tables, net: &Network, - source: RoutingContext, + source: NodeId, qabls: &HashMap, complete: bool, ) { @@ -1365,7 +1365,7 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1394,7 +1394,7 @@ impl HatQueriesTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: RoutingContext, + node_id: NodeId, ) { let rtables = zread!(tables.tables); match (rtables.whatami, face.whatami) { @@ -1422,7 +1422,7 @@ impl HatQueriesTrait for HatCode { &self, tables: &Tables, expr: &mut RoutingExpr, - source: RoutingContext, + source: NodeId, source_type: WhatAmI, ) -> Arc { let mut route = QueryTargetQablSet::new(); @@ -1463,7 +1463,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).routers_net.as_ref().unwrap(); let router_source = match source_type { WhatAmI::Router => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1482,7 +1482,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1500,7 +1500,7 @@ impl HatQueriesTrait for HatCode { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as RoutingContext, + _ => net.idx.index() as NodeId, }; insert_target_for_qabls( &mut route, @@ -1528,7 +1528,7 @@ impl HatQueriesTrait for HatCode { direction: ( context.face.clone(), key_expr.to_owned(), - RoutingContext::default(), + NodeId::default(), ), complete: if complete { qabl_info.complete as u64 @@ -1613,17 +1613,13 @@ impl HatQueriesTrait for HatCode { routes.routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) && hat!(tables).full_net(WhatAmI::Peer) @@ -1644,7 +1640,7 @@ impl HatQueriesTrait for HatCode { routes.peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1653,21 +1649,17 @@ impl HatQueriesTrait for HatCode { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); - routes.peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - RoutingContext::default(), - WhatAmI::Peer, - )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); } if tables.whatami == WhatAmI::Client { routes.client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } @@ -1697,7 +1689,7 @@ impl HatQueriesTrait for HatCode { routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Router, ); } @@ -1705,7 +1697,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1729,7 +1721,7 @@ impl HatQueriesTrait for HatCode { peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - idx.index() as RoutingContext, + idx.index() as NodeId, WhatAmI::Peer, ); } @@ -1738,13 +1730,13 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); res_mut.context_mut().peer_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Peer, )); } @@ -1752,7 +1744,7 @@ impl HatQueriesTrait for HatCode { res_mut.context_mut().client_query_route = Some(self.compute_query_route( tables, &mut expr, - RoutingContext::default(), + NodeId::default(), WhatAmI::Client, )); } From a4a672e01d1c0f79cd83b0bf691318b0e199ee7b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 1 Dec 2023 09:53:50 +0100 Subject: [PATCH 017/122] Add RoutingContext and LoggerInterceptor --- zenoh/src/net/primitives/demux.rs | 45 ++- zenoh/src/net/primitives/mod.rs | 30 ++ zenoh/src/net/primitives/mux.rs | 359 +++++++++++++++++-- zenoh/src/net/routing/dispatcher/face.rs | 7 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 69 ++-- zenoh/src/net/routing/dispatcher/queries.rs | 137 ++++--- zenoh/src/net/routing/dispatcher/resource.rs | 23 +- zenoh/src/net/routing/hat/client/pubsub.rs | 303 +++++++++------- zenoh/src/net/routing/hat/client/queries.rs | 251 +++++++------ zenoh/src/net/routing/hat/peer/mod.rs | 1 - zenoh/src/net/routing/hat/peer/pubsub.rs | 303 +++++++++------- zenoh/src/net/routing/hat/peer/queries.rs | 251 +++++++------ zenoh/src/net/routing/hat/router/mod.rs | 1 - zenoh/src/net/routing/hat/router/pubsub.rs | 303 +++++++++------- zenoh/src/net/routing/hat/router/queries.rs | 251 +++++++------ zenoh/src/net/routing/interceptor/mod.rs | 97 ++++- zenoh/src/net/routing/mod.rs | 92 +++++ zenoh/src/net/routing/router.rs | 27 +- zenoh/src/net/runtime/adminspace.rs | 32 ++ zenoh/src/net/runtime/mod.rs | 21 +- zenoh/src/session.rs | 32 ++ 21 files changed, 1738 insertions(+), 897 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index d326057098..f9694a16f7 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -12,43 +12,50 @@ // ZettaScale Zenoh Team, // use super::Primitives; -use crate::net::routing::interceptor::IngressIntercept; +use crate::net::routing::{dispatcher::face::Face, interceptor::IngressIntercept, RoutingContext}; use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::ZResult; use zenoh_transport::TransportPeerEventHandler; -pub struct DeMux { - primitives: P, +pub struct DeMux { + face: Face, pub(crate) intercept: IngressIntercept, } -impl DeMux

{ - pub(crate) fn new(primitives: P, intercept: IngressIntercept) -> DeMux

{ - DeMux { - primitives, - intercept, - } +impl DeMux { + pub(crate) fn new(face: Face, intercept: IngressIntercept) -> Self { + Self { face, intercept } } } -impl TransportPeerEventHandler for DeMux

{ +impl TransportPeerEventHandler for DeMux { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { - let msg = match self.intercept.intercept(msg) { - Some(msg) => msg, + let ctx = RoutingContext::with_face(msg, self.face.clone()); + let ctx = match self.intercept.intercept(ctx) { + Some(ctx) => ctx, None => return Ok(()), }; - match msg.body { - NetworkBody::Declare(m) => self.primitives.send_declare(m), - NetworkBody::Push(m) => self.primitives.send_push(m), - NetworkBody::Request(m) => self.primitives.send_request(m), - NetworkBody::Response(m) => self.primitives.send_response(m), - NetworkBody::ResponseFinal(m) => self.primitives.send_response_final(m), + match ctx.msg.body { + NetworkBody::Declare(m) => self.face.send_declare(m), + NetworkBody::Push(m) => self.face.send_push(m), + NetworkBody::Request(m) => self.face.send_request(m), + NetworkBody::Response(m) => self.face.send_response(m), + NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), NetworkBody::OAM(_m) => (), } + // match ctx.msg.body { + // NetworkBody::Declare(m) => self.face.send_declare(RoutingContext::new(m, ctx.inface)), + // NetworkBody::Push(m) => self.face.send_push(RoutingContext::new(m, ctx.inface)), + // NetworkBody::Request(m) => self.face.send_request(RoutingContext::new(m, ctx.inface)), + // NetworkBody::Response(m) => self.face.send_response(RoutingContext::new(m, ctx.inface)), + // NetworkBody::ResponseFinal(m) => self.face.send_response_final(RoutingContext::new(m, ctx.inface)), + // NetworkBody::OAM(_m) => (), + // } + Ok(()) } @@ -57,7 +64,7 @@ impl TransportPeerEventHandler for DeMux

{ fn del_link(&self, _link: Link) {} fn closing(&self) { - self.primitives.send_close(); + self.face.send_close(); } fn closed(&self) {} diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index 3718b73ebd..7c0ea9f938 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -18,6 +18,8 @@ pub use demux::*; pub use mux::*; use zenoh_protocol::network::{Declare, Push, Request, Response, ResponseFinal}; +use super::routing::RoutingContext; + pub trait Primitives: Send + Sync { fn send_declare(&self, msg: Declare); @@ -32,6 +34,20 @@ pub trait Primitives: Send + Sync { fn send_close(&self); } +pub(crate) trait EPrimitives: Send + Sync { + fn send_declare(&self, ctx: RoutingContext); + + fn send_push(&self, ctx: RoutingContext); + + fn send_request(&self, ctx: RoutingContext); + + fn send_response(&self, ctx: RoutingContext); + + fn send_response_final(&self, ctx: RoutingContext); + + fn send_close(&self); +} + #[derive(Default)] pub struct DummyPrimitives; @@ -48,3 +64,17 @@ impl Primitives for DummyPrimitives { fn send_close(&self) {} } + +impl EPrimitives for DummyPrimitives { + fn send_declare(&self, _ctx: RoutingContext) {} + + fn send_push(&self, _ctx: RoutingContext) {} + + fn send_request(&self, _ctx: RoutingContext) {} + + fn send_response(&self, _ctx: RoutingContext) {} + + fn send_response_final(&self, _ctx: RoutingContext) {} + + fn send_close(&self) {} +} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 59f8ba99d6..67077004a1 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + // // Copyright (c) 2023 ZettaScale Technology // @@ -11,8 +13,12 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::Primitives; -use crate::net::routing::interceptor::EgressIntercept; +use super::{EPrimitives, Primitives}; +use crate::net::routing::{ + dispatcher::{face::Face, tables::TablesLock}, + interceptor::EgressIntercept, + RoutingContext, +}; use zenoh_protocol::network::{ Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; @@ -20,12 +26,24 @@ use zenoh_transport::{TransportMulticast, TransportUnicast}; pub struct Mux { pub handler: TransportUnicast, + pub(crate) fid: usize, + pub(crate) tables: Arc, pub(crate) intercept: EgressIntercept, } impl Mux { - pub(crate) fn new(handler: TransportUnicast, intercept: EgressIntercept) -> Mux { - Mux { handler, intercept } + pub(crate) fn new( + handler: TransportUnicast, + fid: usize, + tables: Arc, + intercept: EgressIntercept, + ) -> Mux { + Mux { + handler, + fid, + tables, + intercept, + } } } @@ -36,8 +54,20 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + let tables = zread!(self.tables.tables); + let face = tables.faces.get(&self.fid).cloned(); + drop(tables); + if let Some(face) = face { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -47,8 +77,20 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + let tables = zread!(self.tables.tables); + let face = tables.faces.get(&self.fid).cloned(); + drop(tables); + if let Some(face) = face { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -58,8 +100,20 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + let tables = zread!(self.tables.tables); + let face = tables.faces.get(&self.fid).cloned(); + drop(tables); + if let Some(face) = face { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -69,8 +123,20 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + let tables = zread!(self.tables.tables); + let face = tables.faces.get(&self.fid).cloned(); + drop(tables); + if let Some(face) = face { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -80,8 +146,106 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + let tables = zread!(self.tables.tables); + let face = tables.faces.get(&self.fid).cloned(); + drop(tables); + if let Some(face) = face { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + } + + fn send_close(&self) { + // self.handler.closing().await; + } +} + +impl EPrimitives for Mux { + fn send_declare(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Declare(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_push(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Push(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_request(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Request(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_response(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Response(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_response_final(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::ResponseFinal(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); } } @@ -92,12 +256,24 @@ impl Primitives for Mux { pub struct McastMux { pub handler: TransportMulticast, + pub(crate) fid: usize, + pub(crate) tables: Arc, pub(crate) intercept: EgressIntercept, } impl McastMux { - pub(crate) fn new(handler: TransportMulticast, intercept: EgressIntercept) -> McastMux { - McastMux { handler, intercept } + pub(crate) fn new( + handler: TransportMulticast, + fid: usize, + tables: Arc, + intercept: EgressIntercept, + ) -> McastMux { + McastMux { + handler, + fid, + tables, + intercept, + } } } @@ -108,8 +284,17 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -119,8 +304,17 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -130,8 +324,17 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -141,8 +344,17 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } } } @@ -152,8 +364,103 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(msg) = self.intercept.intercept(msg) { - let _ = self.handler.schedule(msg); + if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { + let ctx = RoutingContext::with_face( + msg, + Face { + tables: self.tables.clone(), + state: face.clone(), + }, + ); + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + } + + fn send_close(&self) { + // self.handler.closing().await; + } +} + +impl EPrimitives for McastMux { + fn send_declare(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Declare(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_push(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Push(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_request(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Request(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_response(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::Response(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } + + fn send_response_final(&self, ctx: RoutingContext) { + let ctx = RoutingContext { + msg: NetworkMessage { + body: NetworkBody::ResponseFinal(ctx.msg), + #[cfg(feature = "stats")] + size: None, + }, + inface: ctx.inface, + prefix: ctx.prefix, + full_expr: ctx.full_expr, + }; + if let Some(ctx) = self.intercept.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); } } diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index d2d1f994eb..4a8bfe64bf 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -1,5 +1,3 @@ -use crate::net::primitives::Primitives; - // // Copyright (c) 2023 ZettaScale Technology // @@ -16,6 +14,7 @@ use crate::net::primitives::Primitives; use super::super::router::*; use super::tables::TablesLock; use super::{resource::*, tables}; +use crate::net::primitives::Primitives; use std::any::Any; use std::collections::HashMap; use std::fmt; @@ -35,7 +34,7 @@ pub struct FaceState { pub(crate) whatami: WhatAmI, #[cfg(feature = "stats")] pub(crate) stats: Option>, - pub(crate) primitives: Arc, + pub(crate) primitives: Arc, pub(crate) local_mappings: HashMap>, pub(crate) remote_mappings: HashMap>, pub(crate) next_qid: RequestId, @@ -50,7 +49,7 @@ impl FaceState { zid: ZenohId, whatami: WhatAmI, #[cfg(feature = "stats")] stats: Option>, - primitives: Arc, + primitives: Arc, mcast_group: Option, hat: Box, ) -> Arc { diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 544866323c..33118be161 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -14,6 +14,7 @@ use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, RoutingExpr, Tables}; +use crate::net::routing::RoutingContext; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; @@ -306,13 +307,16 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload, - }) + outface.primitives.send_push(RoutingContext::with_expr( + Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + payload, + }, + expr.full_expr().to_string(), + )) } } else { if !matching_pulls.is_empty() { @@ -341,13 +345,16 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: context }, - payload: payload.clone(), - }) + outface.primitives.send_push(RoutingContext::with_expr( + Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: context }, + payload: payload.clone(), + }, + expr.full_expr().to_string(), + )) } } else { drop(tables); @@ -368,13 +375,16 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload: payload.clone(), - }) + outface.primitives.send_push(RoutingContext::with_expr( + Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + payload: payload.clone(), + }, + expr.full_expr().to_string(), + )) } } } @@ -413,13 +423,16 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE drop(lock); drop(tables); for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload, - }); + face.primitives.send_push(RoutingContext::with_expr( + Push { + wire_expr: key_expr, + ext_qos: ext::QoSType::push_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + payload, + }, + "".to_string(), + )); // TODO } } None => { diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index cb856c5dd1..b29cd36c22 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -1,3 +1,5 @@ +use crate::net::routing::RoutingContext; + // // Copyright (c) 2023 ZettaScale Technology // @@ -472,7 +474,7 @@ pub fn route_query( drop(queries_lock); drop(rtables); - for (expr, payload) in local_replies { + for (wexpr, payload) in local_replies { let payload = ResponseBody::Reply(Reply { timestamp: None, encoding: Encoding::default(), @@ -490,17 +492,22 @@ pub fn route_query( inc_res_stats!(face, tx, admin, payload) } - face.primitives.clone().send_response(Response { - rid: qid, - wire_expr: expr, - payload, - ext_qos: response::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid, - eid: 0, // TODO - }), - }); + face.primitives + .clone() + .send_response(RoutingContext::with_expr( + Response { + rid: qid, + wire_expr: wexpr, + payload, + ext_qos: response::ext::QoSType::declare_default(), + ext_tstamp: None, + ext_respid: Some(response::ext::ResponderIdType { + zid, + eid: 0, // TODO + }), + }, + expr.full_expr().to_string(), + )); } if route.is_empty() { @@ -509,11 +516,16 @@ pub fn route_query( face, qid ); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); + face.primitives + .clone() + .send_response_final(RoutingContext::with_expr( + ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }, + expr.full_expr().to_string(), + )); } else { // let timer = tables.timer.clone(); // let timeout = tables.queries_default_timeout; @@ -569,28 +581,36 @@ pub fn route_query( } log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: target, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); } } } } else { log::debug!("Send final reply {}:{} (not master)", face, qid); drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); + face.primitives + .clone() + .send_response_final(RoutingContext::with_expr( + ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }, + expr.full_expr().to_string(), + )); } } None => { @@ -599,11 +619,16 @@ pub fn route_query( expr.scope ); drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); + face.primitives + .clone() + .send_response_final(RoutingContext::with_expr( + ResponseFinal { + rid: qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }, + "".to_string(), + )); // TODO } } } @@ -638,14 +663,21 @@ pub(crate) fn route_send_response( inc_res_stats!(query.src_face, tx, admin, body) } - query.src_face.primitives.clone().send_response(Response { - rid: query.src_qid, - wire_expr: key_expr.to_owned(), - payload: body, - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid, - }); + query + .src_face + .primitives + .clone() + .send_response(RoutingContext::with_expr( + Response { + rid: query.src_qid, + wire_expr: key_expr.to_owned(), + payload: body, + ext_qos: response::ext::QoSType::response_default(), + ext_tstamp: None, + ext_respid, + }, + "".to_string(), + )); // TODO } None => log::warn!( "Route reply {}:{} from {}: Query nof found!", @@ -697,10 +729,13 @@ pub(crate) fn finalize_pending_query(query: Arc) { .src_face .primitives .clone() - .send_response_final(ResponseFinal { - rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); + .send_response_final(RoutingContext::with_expr( + ResponseFinal { + rid: query.src_qid, + ext_qos: response::ext::QoSType::response_final_default(), + ext_tstamp: None, + }, + "".to_string(), + )); // TODO } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 5988f48ed5..202a104922 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -1,3 +1,5 @@ +use crate::net::routing::RoutingContext; + // // Copyright (c) 2023 ZettaScale Technology // @@ -516,15 +518,18 @@ impl Resource { get_mut_unchecked(face) .local_mappings .insert(expr_id, nonwild_prefix.clone()); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { - id: expr_id, - wire_expr: nonwild_prefix.expr().into(), - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareKeyExpr(DeclareKeyExpr { + id: expr_id, + wire_expr: nonwild_prefix.expr().into(), + }), + }, + nonwild_prefix.expr(), + )); WireExpr { scope: expr_id, suffix: wildsuffix.into(), diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index f18d778819..189e6cb6e8 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -20,7 +20,7 @@ use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -55,18 +55,21 @@ fn send_sourced_subscription_to_net_childs( log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -109,16 +112,19 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); } } @@ -461,16 +467,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -480,16 +491,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -562,17 +578,20 @@ fn send_forget_sourced_subscription_to_net_childs( log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -585,15 +604,18 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } } @@ -621,15 +643,18 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(&mut face).local_subs.remove(res); } @@ -857,15 +882,18 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } @@ -917,16 +945,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).router_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { @@ -942,16 +973,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } @@ -962,16 +996,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).peer_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } else { @@ -1155,17 +1192,20 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_subs.remove(res); } @@ -1177,16 +1217,19 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: reliability: Reliability::Reliable, // TODO mode: Mode::Push, }; - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index a6302abc4e..c1093a8a00 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::dispatcher::tables::{ }; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -208,18 +208,21 @@ fn send_sourced_queryable_to_net_childs( log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -275,16 +278,19 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } @@ -678,17 +684,20 @@ fn send_forget_sourced_queryable_to_net_childs( log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -701,15 +710,18 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { @@ -1071,16 +1092,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1093,16 +1117,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1230,15 +1257,20 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_qabls.remove(res); } @@ -1249,16 +1281,19 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/peer/mod.rs index bedeffe396..4ae063003e 100644 --- a/zenoh/src/net/routing/hat/peer/mod.rs +++ b/zenoh/src/net/routing/hat/peer/mod.rs @@ -370,7 +370,6 @@ impl HatBaseTrait for HatCode { } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); diff --git a/zenoh/src/net/routing/hat/peer/pubsub.rs b/zenoh/src/net/routing/hat/peer/pubsub.rs index f18d778819..189e6cb6e8 100644 --- a/zenoh/src/net/routing/hat/peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/peer/pubsub.rs @@ -20,7 +20,7 @@ use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -55,18 +55,21 @@ fn send_sourced_subscription_to_net_childs( log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -109,16 +112,19 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); } } @@ -461,16 +467,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -480,16 +491,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -562,17 +578,20 @@ fn send_forget_sourced_subscription_to_net_childs( log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -585,15 +604,18 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } } @@ -621,15 +643,18 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(&mut face).local_subs.remove(res); } @@ -857,15 +882,18 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } @@ -917,16 +945,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).router_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { @@ -942,16 +973,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } @@ -962,16 +996,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).peer_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } else { @@ -1155,17 +1192,20 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_subs.remove(res); } @@ -1177,16 +1217,19 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: reliability: Reliability::Reliable, // TODO mode: Mode::Push, }; - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/peer/queries.rs b/zenoh/src/net/routing/hat/peer/queries.rs index a6302abc4e..c1093a8a00 100644 --- a/zenoh/src/net/routing/hat/peer/queries.rs +++ b/zenoh/src/net/routing/hat/peer/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::dispatcher::tables::{ }; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -208,18 +208,21 @@ fn send_sourced_queryable_to_net_childs( log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -275,16 +278,19 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } @@ -678,17 +684,20 @@ fn send_forget_sourced_queryable_to_net_childs( log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -701,15 +710,18 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { @@ -1071,16 +1092,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1093,16 +1117,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1230,15 +1257,20 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_qabls.remove(res); } @@ -1249,16 +1281,19 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index bedeffe396..4ae063003e 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -370,7 +370,6 @@ impl HatBaseTrait for HatCode { } face_hat_mut!(&mut face.state).link_id = link_id; - pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index f18d778819..189e6cb6e8 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -20,7 +20,7 @@ use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -55,18 +55,21 @@ fn send_sourced_subscription_to_net_childs( log::debug!("Send subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -109,16 +112,19 @@ fn propagate_simple_subscription_to( { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); } } @@ -461,16 +467,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -480,16 +491,21 @@ fn declare_client_subscription( // TODO: Let's deactivate this on windows until Fixed #[cfg(not(windows))] for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } } @@ -562,17 +578,20 @@ fn send_forget_sourced_subscription_to_net_childs( log::debug!("Send forget subscription {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -585,15 +604,18 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } } @@ -621,15 +643,18 @@ fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc< }) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(&mut face).local_subs.remove(res); } @@ -857,15 +882,18 @@ pub(super) fn undeclare_client_subscription( && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) { let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); face_hat_mut!(face).local_subs.remove(res); } @@ -917,16 +945,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).router_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { for sub in &hat!(tables).router_subs { @@ -942,16 +973,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } @@ -962,16 +996,19 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { for sub in &hat!(tables).peer_subs { face_hat_mut!(face).local_subs.insert(sub.clone()); let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } else { @@ -1155,17 +1192,20 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_subs.remove(res); } @@ -1177,16 +1217,19 @@ pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: reliability: Reliability::Reliable, // TODO mode: Mode::Push, }; - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index a6302abc4e..c1093a8a00 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -22,7 +22,7 @@ use crate::net::routing::dispatcher::tables::{ }; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; -use crate::net::routing::PREFIX_LIVELINESS; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -208,18 +208,21 @@ fn send_sourced_queryable_to_net_childs( log::debug!("Send queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -275,16 +278,19 @@ fn propagate_simple_queryable( .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } @@ -678,17 +684,20 @@ fn send_forget_sourced_queryable_to_net_childs( log::debug!("Send forget queryable {} on {}", res.expr(), someface); - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + res.expr(), + )); } } None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), @@ -701,15 +710,18 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { @@ -1071,16 +1092,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1093,16 +1117,19 @@ pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { let info = local_qabl_info(tables, qabl, face); face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1230,15 +1257,20 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links }; if forget { let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); face_hat_mut!(dst_face).local_qabls.remove(res); } @@ -1249,16 +1281,19 @@ pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links .local_qabls .insert(res.clone(), info); let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); } } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 1d41750efa..db20e6a612 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -17,11 +17,36 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use super::RoutingContext; use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{TransportMulticast, TransportUnicast}; pub(crate) trait InterceptTrait { - fn intercept(&self, msg: NetworkMessage) -> Option; + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option>; +} + +pub(crate) type Intercept = Box; +pub(crate) type IngressIntercept = Intercept; +pub(crate) type EgressIntercept = Intercept; + +pub(crate) trait InterceptorTrait { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option); + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; +} + +pub(crate) type Interceptor = Box; + +pub(crate) fn interceptors() -> Vec { + // Add interceptors here + // vec![Box::new(LoggerInterceptor {})] + vec![] } pub(crate) struct InterceptsChain { @@ -42,36 +67,74 @@ impl From> for InterceptsChain { } impl InterceptTrait for InterceptsChain { - fn intercept(&self, mut msg: NetworkMessage) -> Option { + fn intercept( + &self, + mut ctx: RoutingContext, + ) -> Option> { for intercept in &self.intercepts { - match intercept.intercept(msg) { - Some(newmsg) => msg = newmsg, + match intercept.intercept(ctx) { + Some(newctx) => ctx = newctx, None => { log::trace!("Msg intercepted!"); return None; } } } - Some(msg) + Some(ctx) } } -pub(crate) type Intercept = Box; -pub(crate) type IngressIntercept = Intercept; -pub(crate) type EgressIntercept = Intercept; +pub(crate) struct IngressMsgLogger {} -pub(crate) trait InterceptorTrait { +impl InterceptTrait for IngressMsgLogger { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + log::debug!( + "Recv {} {} Expr:{:?}", + ctx.inface() + .map(|f| f.to_string()) + .unwrap_or("None".to_string()), + ctx.msg, + ctx.full_expr(), + ); + Some(ctx) + } +} +pub(crate) struct EgressMsgLogger {} + +impl InterceptTrait for EgressMsgLogger { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + log::debug!("Send {} Expr:{:?}", ctx.msg, ctx.full_expr()); + Some(ctx) + } +} + +pub(crate) struct LoggerInterceptor {} + +impl InterceptorTrait for LoggerInterceptor { fn new_transport_unicast( &self, transport: &TransportUnicast, - ) -> (Option, Option); - fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; - fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; -} + ) -> (Option, Option) { + log::debug!("New transport unicast {:?}", transport); + ( + Some(Box::new(IngressMsgLogger {})), + Some(Box::new(EgressMsgLogger {})), + ) + } -pub(crate) type Interceptor = Box; + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { + log::debug!("New transport multicast {:?}", transport); + Some(Box::new(EgressMsgLogger {})) + } -pub(crate) fn interceptors() -> Vec { - // Add interceptors here - vec![] + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { + log::debug!("New peer multicast {:?}", transport); + Some(Box::new(IngressMsgLogger {})) + } } diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 597163c3e1..527ee229a1 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -22,6 +22,98 @@ pub mod hat; pub mod interceptor; pub mod router; +use std::{cell::OnceCell, sync::Arc}; + +use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; + +use self::{dispatcher::face::Face, router::Resource}; + use super::runtime; pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; + +pub(crate) struct RoutingContext { + pub(crate) msg: Msg, + pub(crate) inface: OnceCell, + pub(crate) prefix: OnceCell>, + pub(crate) full_expr: OnceCell, +} + +impl RoutingContext { + pub(crate) fn with_face(msg: Msg, inface: Face) -> Self { + Self { + msg, + inface: OnceCell::from(inface), + prefix: OnceCell::new(), + full_expr: OnceCell::new(), + } + } + + pub(crate) fn with_expr(msg: Msg, expr: String) -> Self { + Self { + msg, + inface: OnceCell::new(), + prefix: OnceCell::new(), + full_expr: OnceCell::from(expr), + } + } + + pub(crate) fn inface(&self) -> Option<&Face> { + self.inface.get() + } +} + +impl RoutingContext { + #[inline] + pub(crate) fn wire_expr(&self) -> Option<&WireExpr> { + use zenoh_protocol::network::DeclareBody; + use zenoh_protocol::network::NetworkBody; + match &self.msg.body { + NetworkBody::Push(m) => Some(&m.wire_expr), + NetworkBody::Request(m) => Some(&m.wire_expr), + NetworkBody::Response(m) => Some(&m.wire_expr), + NetworkBody::ResponseFinal(_) => None, + NetworkBody::Declare(m) => match &m.body { + DeclareBody::DeclareKeyExpr(m) => Some(&m.wire_expr), + DeclareBody::UndeclareKeyExpr(_) => None, + DeclareBody::DeclareSubscriber(m) => Some(&m.wire_expr), + DeclareBody::UndeclareSubscriber(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareQueryable(m) => Some(&m.wire_expr), + DeclareBody::UndeclareQueryable(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareToken(m) => Some(&m.wire_expr), + DeclareBody::UndeclareToken(m) => Some(&m.ext_wire_expr.wire_expr), + DeclareBody::DeclareInterest(m) => Some(&m.wire_expr), + DeclareBody::FinalInterest(_) => None, + DeclareBody::UndeclareInterest(m) => Some(&m.ext_wire_expr.wire_expr), + }, + NetworkBody::OAM(_) => None, + } + } + + #[inline] + pub(crate) fn full_expr(&self) -> Option<&str> { + if self.full_expr.get().is_some() { + return Some(self.full_expr.get().as_ref().unwrap()); + } + if let Some(face) = self.inface.get() { + if let Some(wire_expr) = self.wire_expr() { + let wire_expr = wire_expr.to_owned(); + if self.prefix.get().is_none() { + if let Some(prefix) = zread!(face.tables.tables) + .get_mapping(&face.state, &wire_expr.scope, wire_expr.mapping) + .cloned() + { + let _ = self.prefix.set(prefix); + } + } + if let Some(prefix) = self.prefix.get().cloned() { + let _ = self + .full_expr + .set(prefix.expr() + wire_expr.suffix.as_ref()); + return Some(self.full_expr.get().as_ref().unwrap()); + } + } + } + None + } +} diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index e32d213f79..5b7c18db2d 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -23,9 +23,9 @@ use super::interceptor::InterceptsChain; use super::runtime::Runtime; use crate::net::primitives::DeMux; use crate::net::primitives::DummyPrimitives; +use crate::net::primitives::EPrimitives; use crate::net::primitives::McastMux; use crate::net::primitives::Mux; -use crate::net::primitives::Primitives; use crate::net::routing::interceptor::IngressIntercept; use std::any::Any; use std::str::FromStr; @@ -98,7 +98,10 @@ impl Router { ) } - pub fn new_primitives(&self, primitives: Arc) -> Arc { + pub(crate) fn new_primitives( + &self, + primitives: Arc, + ) -> Arc { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); @@ -171,7 +174,12 @@ impl Router { whatami, #[cfg(feature = "stats")] Some(stats), - Arc::new(Mux::new(transport.clone(), egress)), + Arc::new(Mux::new( + transport.clone(), + fid, + self.tables.clone(), + egress, + )), None, ctrl_lock.new_face(), ) @@ -212,7 +220,12 @@ impl Router { WhatAmI::Peer, #[cfg(feature = "stats")] None, - Arc::new(McastMux::new(transport.clone(), intercept)), + Arc::new(McastMux::new( + transport.clone(), + fid, + self.tables.clone(), + intercept, + )), Some(transport), ctrl_lock.new_face(), )); @@ -227,7 +240,7 @@ impl Router { &self, transport: TransportMulticast, peer: TransportPeer, - ) -> ZResult>> { + ) -> ZResult> { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; @@ -267,8 +280,7 @@ impl Router { pub struct LinkStateInterceptor { pub(crate) transport: TransportUnicast, pub(crate) tables: Arc, - pub(crate) face: Face, - pub(crate) demux: DeMux, + pub(crate) demux: DeMux, } impl LinkStateInterceptor { @@ -281,7 +293,6 @@ impl LinkStateInterceptor { LinkStateInterceptor { transport, tables, - face: face.clone(), demux: DeMux::new(face, ingress), } } diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index 789c618ed0..db21c9633c 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -403,6 +403,38 @@ impl Primitives for AdminSpace { } } +impl crate::net::primitives::EPrimitives for AdminSpace { + #[inline] + fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_declare(ctx.msg) + } + + #[inline] + fn send_push(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_push(ctx.msg) + } + + #[inline] + fn send_request(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_request(ctx.msg) + } + + #[inline] + fn send_response(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_response(ctx.msg) + } + + #[inline] + fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_response_final(ctx.msg) + } + + #[inline] + fn send_close(&self) { + (self as &dyn Primitives).send_close() + } +} + fn router_data(context: &AdminContext, query: Query) { let reply_key: OwnedKeyExpr = format!("@/router/{}", context.zid_str).try_into().unwrap(); diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 20db2540c4..72e24bf32b 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -22,8 +22,6 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; -use super::routing::dispatcher::face::Face; -use super::routing::dispatcher::pubsub::full_reentrant_route_data; use super::routing::router::{LinkStateInterceptor, Router}; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; @@ -39,7 +37,7 @@ use stop_token::{StopSource, TimedOutError}; use uhlc::{HLCBuilder, HLC}; use zenoh_link::{EndPoint, Link}; use zenoh_protocol::core::{whatami::WhatAmIMatcher, Locator, WhatAmI, ZenohId}; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; +use zenoh_protocol::network::NetworkMessage; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; use zenoh_transport::{ @@ -278,21 +276,6 @@ pub(super) struct RuntimeSession { impl TransportPeerEventHandler for RuntimeSession { fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { - // critical path shortcut - if let NetworkBody::Push(data) = msg.body { - let face = &self.main_handler.face.state; - - full_reentrant_route_data( - &self.main_handler.tables.tables, - face, - &data.wire_expr, - data.ext_qos, - data.payload, - data.ext_nodeid.node_id, - ); - return Ok(()); - } - self.main_handler.handle_message(msg) } @@ -370,7 +353,7 @@ impl TransportMulticastEventHandler for RuntimeMuticastGroup { } pub(super) struct RuntimeMuticastSession { - pub(super) main_handler: Arc>, + pub(super) main_handler: Arc, pub(super) slave_handlers: Vec>, } diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index df90644a6b..2ffbe0ae2a 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2374,3 +2374,35 @@ pub trait SessionDeclarations { #[zenoh_macros::unstable] fn liveliness(&self) -> Liveliness<'static>; } + +impl crate::net::primitives::EPrimitives for Session { + #[inline] + fn send_declare(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_declare(ctx.msg) + } + + #[inline] + fn send_push(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_push(ctx.msg) + } + + #[inline] + fn send_request(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_request(ctx.msg) + } + + #[inline] + fn send_response(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_response(ctx.msg) + } + + #[inline] + fn send_response_final(&self, ctx: crate::net::routing::RoutingContext) { + (self as &dyn Primitives).send_response_final(ctx.msg) + } + + #[inline] + fn send_close(&self) { + (self as &dyn Primitives).send_close() + } +} From 59e8de713f8b187f37248c4c136e9966107e7d72 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 5 Dec 2023 11:30:43 +0100 Subject: [PATCH 018/122] Interceptors can access the Config at construction --- zenoh/src/net/routing/dispatcher/tables.rs | 20 ++++++++++---------- zenoh/src/net/routing/interceptor/mod.rs | 3 ++- zenoh/src/net/routing/router.rs | 20 +++----------------- zenoh/src/net/runtime/mod.rs | 16 ++-------------- 4 files changed, 17 insertions(+), 42 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 90103b37b7..6bf9eec4f9 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -23,8 +23,9 @@ use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Weak}; use std::sync::{Mutex, RwLock}; -use std::time::Duration; use uhlc::HLC; +use zenoh_config::unwrap_or_default; +use zenoh_config::Config; use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; use zenoh_protocol::network::Mapping; #[cfg(feature = "stats")] @@ -77,14 +78,13 @@ pub struct Tables { } impl Tables { - pub fn new( - zid: ZenohId, - whatami: WhatAmI, - hlc: Option>, - drop_future_timestamp: bool, - router_peers_failover_brokering: bool, - _queries_default_timeout: Duration, - ) -> Self { + pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { + let drop_future_timestamp = + unwrap_or_default!(config.timestamping().drop_future_timestamp()); + let router_peers_failover_brokering = + unwrap_or_default!(config.routing().router().peers_failover_brokering()); + // let queries_default_timeout = + // Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); let hat_code = hat::new_hat(whatami); Tables { zid, @@ -98,7 +98,7 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], - interceptors: interceptors(), + interceptors: interceptors(config), pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index db20e6a612..8ee2ff41bc 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -18,6 +18,7 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use super::RoutingContext; +use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{TransportMulticast, TransportUnicast}; @@ -43,7 +44,7 @@ pub(crate) trait InterceptorTrait { pub(crate) type Interceptor = Box; -pub(crate) fn interceptors() -> Vec { +pub(crate) fn interceptors(_config: &Config) -> Vec { // Add interceptors here // vec![Box::new(LoggerInterceptor {})] vec![] diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 5b7c18db2d..db7d1170bb 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -31,8 +31,8 @@ use std::any::Any; use std::str::FromStr; use std::sync::Arc; use std::sync::{Mutex, RwLock}; -use std::time::Duration; use uhlc::HLC; +use zenoh_config::Config; use zenoh_link::Link; use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; @@ -48,25 +48,11 @@ pub struct Router { } impl Router { - pub fn new( - zid: ZenohId, - whatami: WhatAmI, - hlc: Option>, - drop_future_timestamp: bool, - router_peers_failover_brokering: bool, - queries_default_timeout: Duration, - ) -> Self { + pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { Router { // whatami, tables: Arc::new(TablesLock { - tables: RwLock::new(Tables::new( - zid, - whatami, - hlc, - drop_future_timestamp, - router_peers_failover_brokering, - queries_default_timeout, - )), + tables: RwLock::new(Tables::new(zid, whatami, hlc, config)), ctrl_lock: Mutex::new(hat::new_hat(whatami)), queries_lock: RwLock::new(()), }), diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index 72e24bf32b..d7da114442 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -31,7 +31,6 @@ use futures::stream::StreamExt; use futures::Future; use std::any::Any; use std::sync::Arc; -use std::time::Duration; use stop_token::future::FutureExt; use stop_token::{StopSource, TimedOutError}; use uhlc::{HLCBuilder, HLC}; @@ -93,8 +92,6 @@ impl Runtime { let metadata = config.metadata().clone(); let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); - let drop_future_timestamp = - unwrap_or_default!(config.timestamping().drop_future_timestamp()); let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); @@ -109,17 +106,8 @@ impl Runtime { && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); - let queries_default_timeout = - Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); - - let router = Arc::new(Router::new( - zid, - whatami, - hlc.clone(), - drop_future_timestamp, - router_peers_failover_brokering, - queries_default_timeout, - )); + + let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)); let handler = Arc::new(RuntimeTransportEventHandler { runtime: std::sync::RwLock::new(None), From 9c94df5e203e2d448fdde62ec8001927f799066f Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 5 Dec 2023 17:10:17 +0100 Subject: [PATCH 019/122] Split linkstate and p2p peer hats --- zenoh/src/net/routing/dispatcher/tables.rs | 2 +- .../hat/{peer => linkstate_peer}/mod.rs | 0 .../hat/{peer => linkstate_peer}/network.rs | 0 .../hat/{peer => linkstate_peer}/pubsub.rs | 0 .../hat/{peer => linkstate_peer}/queries.rs | 0 zenoh/src/net/routing/hat/mod.rs | 15 +- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 836 ++++++++ zenoh/src/net/routing/hat/p2p_peer/network.rs | 1007 ++++++++++ zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 1650 +++++++++++++++ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 1788 +++++++++++++++++ zenoh/src/net/routing/router.rs | 2 +- 11 files changed, 5294 insertions(+), 6 deletions(-) rename zenoh/src/net/routing/hat/{peer => linkstate_peer}/mod.rs (100%) rename zenoh/src/net/routing/hat/{peer => linkstate_peer}/network.rs (100%) rename zenoh/src/net/routing/hat/{peer => linkstate_peer}/pubsub.rs (100%) rename zenoh/src/net/routing/hat/{peer => linkstate_peer}/queries.rs (100%) create mode 100644 zenoh/src/net/routing/hat/p2p_peer/mod.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/network.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/queries.rs diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 6bf9eec4f9..787689e1e6 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -85,7 +85,7 @@ impl Tables { unwrap_or_default!(config.routing().router().peers_failover_brokering()); // let queries_default_timeout = // Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); - let hat_code = hat::new_hat(whatami); + let hat_code = hat::new_hat(whatami, config); Tables { zid, whatami, diff --git a/zenoh/src/net/routing/hat/peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs similarity index 100% rename from zenoh/src/net/routing/hat/peer/mod.rs rename to zenoh/src/net/routing/hat/linkstate_peer/mod.rs diff --git a/zenoh/src/net/routing/hat/peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs similarity index 100% rename from zenoh/src/net/routing/hat/peer/network.rs rename to zenoh/src/net/routing/hat/linkstate_peer/network.rs diff --git a/zenoh/src/net/routing/hat/peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs similarity index 100% rename from zenoh/src/net/routing/hat/peer/pubsub.rs rename to zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs diff --git a/zenoh/src/net/routing/hat/peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs similarity index 100% rename from zenoh/src/net/routing/hat/peer/queries.rs rename to zenoh/src/net/routing/hat/linkstate_peer/queries.rs diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index eccd59a0aa..01da9d34bf 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -27,7 +27,7 @@ use super::dispatcher::{ use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; -use zenoh_config::{WhatAmI, WhatAmIMatcher}; +use zenoh_config::{unwrap_or_default, Config, WhatAmI, WhatAmIMatcher}; use zenoh_protocol::{ core::WireExpr, network::{ @@ -39,7 +39,8 @@ use zenoh_result::ZResult; use zenoh_transport::TransportUnicast; mod client; -mod peer; +mod linkstate_peer; +mod p2p_peer; mod router; zconfigurable! { @@ -186,10 +187,16 @@ pub(crate) trait HatQueriesTrait { ) -> Vec<(WireExpr<'static>, ZBuf)>; } -pub(crate) fn new_hat(whatami: WhatAmI) -> Box { +pub(crate) fn new_hat(whatami: WhatAmI, config: &Config) -> Box { match whatami { WhatAmI::Client => Box::new(client::HatCode {}), - WhatAmI::Peer => Box::new(peer::HatCode {}), + WhatAmI::Peer => { + if unwrap_or_default!(config.routing().peer().mode()) == *"linkstate" { + Box::new(linkstate_peer::HatCode {}) + } else { + Box::new(p2p_peer::HatCode {}) + } + } WhatAmI::Router => Box::new(router::HatCode {}), } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs new file mode 100644 index 0000000000..4ae063003e --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -0,0 +1,836 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) +use self::{ + network::{shared_nodes, Network}, + pubsub::{ + pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, + }, + queries::{ + queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, + }, +}; +use super::{ + super::dispatcher::{ + face::FaceState, + tables::{NodeId, Resource, RoutingExpr, Tables, TablesLock}, + }, + HatBaseTrait, HatTrait, +}; +use crate::{ + net::{ + codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, + }, + runtime::Runtime, +}; +use async_std::task::JoinHandle; +use std::{ + any::Any, + collections::{hash_map::DefaultHasher, HashMap, HashSet}, + hash::Hasher, + sync::Arc, +}; +use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::{ + common::ZExtBody, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, +}; +use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; +use zenoh_transport::TransportUnicast; + +mod network; +mod pubsub; +mod queries; + +zconfigurable! { + static ref TREES_COMPUTATION_DELAY: u64 = 100; +} + +macro_rules! hat { + ($t:expr) => { + $t.hat.downcast_ref::().unwrap() + }; +} +use hat; + +macro_rules! hat_mut { + ($t:expr) => { + $t.hat.downcast_mut::().unwrap() + }; +} +use hat_mut; + +macro_rules! res_hat { + ($r:expr) => { + $r.context().hat.downcast_ref::().unwrap() + }; +} +use res_hat; + +macro_rules! res_hat_mut { + ($r:expr) => { + get_mut_unchecked($r) + .context_mut() + .hat + .downcast_mut::() + .unwrap() + }; +} +use res_hat_mut; + +macro_rules! face_hat { + ($f:expr) => { + $f.hat.downcast_ref::().unwrap() + }; +} +use face_hat; + +macro_rules! face_hat_mut { + ($f:expr) => { + get_mut_unchecked($f).hat.downcast_mut::().unwrap() + }; +} +use face_hat_mut; + +struct HatTables { + router_subs: HashSet>, + peer_subs: HashSet>, + router_qabls: HashSet>, + peer_qabls: HashSet>, + routers_net: Option, + peers_net: Option, + shared_nodes: Vec, + routers_trees_task: Option>, + peers_trees_task: Option>, + router_peers_failover_brokering: bool, +} + +impl HatTables { + fn new(router_peers_failover_brokering: bool) -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashSet::new(), + peer_qabls: HashSet::new(), + routers_net: None, + peers_net: None, + shared_nodes: vec![], + routers_trees_task: None, + peers_trees_task: None, + router_peers_failover_brokering, + } + } + + #[inline] + fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { + match net_type { + WhatAmI::Router => self.routers_net.as_ref(), + WhatAmI::Peer => self.peers_net.as_ref(), + _ => None, + } + } + + #[inline] + fn full_net(&self, net_type: WhatAmI) -> bool { + match net_type { + WhatAmI::Router => self + .routers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + WhatAmI::Peer => self + .peers_net + .as_ref() + .map(|net| net.full_linkstate) + .unwrap_or(false), + _ => false, + } + } + + #[inline] + fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { + self.peers_net + .as_ref() + .unwrap() + .get_links(peer) + .iter() + .filter(move |nid| { + if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { + node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router + } else { + false + } + }) + } + + #[inline] + fn elect_router<'a>( + &'a self, + self_zid: &'a ZenohId, + key_expr: &str, + mut routers: impl Iterator, + ) -> &'a ZenohId { + match routers.next() { + None => self_zid, + Some(router) => { + let hash = |r: &ZenohId| { + let mut hasher = DefaultHasher::new(); + for b in key_expr.as_bytes() { + hasher.write_u8(*b); + } + for b in &r.to_le_bytes()[..r.size()] { + hasher.write_u8(*b); + } + hasher.finish() + }; + let mut res = router; + let mut h = None; + for router2 in routers { + let h2 = hash(router2); + if h2 > *h.get_or_insert_with(|| hash(res)) { + res = router2; + h = Some(h2); + } + } + res + } + } + } + + #[inline] + fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { + // if source_links is empty then gossip is probably disabled in source peer + !source_links.is_empty() && !source_links.contains(&dest) + } + + #[inline] + fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { + self.router_peers_failover_brokering + && self + .peers_net + .as_ref() + .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .unwrap_or(false) + } + + fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + log::trace!("Schedule computations"); + if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) + || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) + { + let task = Some(async_std::task::spawn(async move { + async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) + .await; + let mut tables = zwrite!(tables_ref.tables); + + log::trace!("Compute trees"); + let new_childs = match net_type { + WhatAmI::Router => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .compute_trees(), + _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), + }; + + log::trace!("Compute routes"); + pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); + queries::queries_tree_change(&mut tables, &new_childs, net_type); + + log::trace!("Computations completed"); + match net_type { + WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, + _ => hat_mut!(tables).peers_trees_task = None, + }; + })); + match net_type { + WhatAmI::Router => self.routers_trees_task = task, + _ => self.peers_trees_task = task, + }; + } + } +} + +pub(crate) struct HatCode {} + +impl HatBaseTrait for HatCode { + fn init( + &self, + tables: &mut Tables, + runtime: Runtime, + router_full_linkstate: bool, + peer_full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) { + if router_full_linkstate | gossip { + hat_mut!(tables).routers_net = Some(Network::new( + "[Routers network]".to_string(), + tables.zid, + runtime.clone(), + router_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if peer_full_linkstate | gossip { + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); + } + if router_full_linkstate && peer_full_linkstate { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + } + + fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new(router_peers_failover_brokering)) + } + + fn new_face(&self) -> Box { + Box::new(HatFace::new()) + } + + fn new_resource(&self) -> Box { + Box::new(HatContext::new()) + } + + fn new_local_face( + &self, + tables: &mut Tables, + _tables_ref: &Arc, + face: &mut Face, + ) -> ZResult<()> { + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); + Ok(()) + } + + fn new_transport_unicast_face( + &self, + tables: &mut Tables, + tables_ref: &Arc, + face: &mut Face, + transport: &TransportUnicast, + ) -> ZResult<()> { + let link_id = match (tables.whatami, face.state.whatami) { + (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .add_link(transport.clone()), + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 + } + } + _ => 0, + }; + + if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + face_hat_mut!(&mut face.state).link_id = link_id; + pubsub_new_face(tables, &mut face.state); + queries_new_face(tables, &mut face.state); + + match (tables.whatami, face.state.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat_mut!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } + } + _ => (), + } + Ok(()) + } + + fn close_face(&self, tables: &TablesLock, face: &mut Arc) { + let mut wtables = zwrite!(tables.tables); + let mut face_clone = face.clone(); + let face = get_mut_unchecked(face); + for res in face.remote_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.remote_mappings.clear(); + for res in face.local_mappings.values_mut() { + get_mut_unchecked(res).session_ctxs.remove(&face.id); + Resource::clean(res); + } + face.local_mappings.clear(); + + let mut subs_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_subs + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_subscription(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_data_routes = false; + subs_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + subs_matches.push(res); + } + } + + let mut qabls_matches = vec![]; + for mut res in face + .hat + .downcast_mut::() + .unwrap() + .remote_qabls + .drain() + { + get_mut_unchecked(&mut res).session_ctxs.remove(&face.id); + undeclare_client_queryable(&mut wtables, &mut face_clone, &mut res); + + if res.context.is_some() { + for match_ in &res.context().matches { + let mut match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, &res) { + get_mut_unchecked(&mut match_) + .context_mut() + .valid_query_routes = false; + qabls_matches.push(match_); + } + } + get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + qabls_matches.push(res); + } + } + drop(wtables); + + let mut matches_data_routes = vec![]; + let mut matches_query_routes = vec![]; + let rtables = zread!(tables.tables); + for _match in subs_matches.drain(..) { + matches_data_routes.push(( + _match.clone(), + rtables.hat_code.compute_data_routes_(&rtables, &_match), + )); + } + for _match in qabls_matches.drain(..) { + matches_query_routes.push(( + _match.clone(), + rtables.hat_code.compute_query_routes_(&rtables, &_match), + )); + } + drop(rtables); + + let mut wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + Resource::clean(&mut res); + } + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + Resource::clean(&mut res); + } + wtables.faces.remove(&face.id); + drop(wtables); + } + + fn handle_oam( + &self, + tables: &mut Tables, + tables_ref: &Arc, + oam: Oam, + transport: &TransportUnicast, + ) -> ZResult<()> { + if oam.id == OAM_LINKSTATE { + if let ZExtBody::ZBuf(buf) = oam.body { + if let Ok(zid) = transport.get_zid() { + use zenoh_buffers::reader::HasReader; + use zenoh_codec::RCodec; + let codec = Zenoh080Routing::new(); + let mut reader = buf.reader(); + let list: LinkStateList = codec.read(&mut reader).unwrap(); + + let whatami = transport.get_whatami()?; + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .link_states(list.link_states, zid) + .removed_nodes + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + queries_remove_node( + tables, + &removed_node.zid, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else { + for (_, updated_node) in changes.updated_nodes { + pubsub_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + queries_linkstate_change( + tables, + &updated_node.zid, + &updated_node.links, + ); + } + } + } + } + _ => (), + }; + } + } + } + + Ok(()) + } + + fn map_routing_context( + &self, + tables: &Tables, + face: &FaceState, + routing_context: NodeId, + ) -> NodeId { + match tables.whatami { + WhatAmI::Router => match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face_hat!(face).link_id), + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face_hat!(face).link_id) + } else { + 0 + } + } + _ => 0, + }, + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face_hat!(face).link_id) + } else { + 0 + } + } + _ => 0, + } + } + + fn closing( + &self, + tables: &mut Tables, + tables_ref: &Arc, + transport: &TransportUnicast, + ) -> ZResult<()> { + match (transport.get_zid(), transport.get_whatami()) { + (Ok(zid), Ok(whatami)) => { + match (tables.whatami, whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + for (_, removed_node) in hat_mut!(tables) + .routers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(tables).full_net(WhatAmI::Peer) { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Router { + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); + } + + hat_mut!(tables) + .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); + } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.remove_link(&zid); + } + } + _ => (), + }; + } + (_, _) => log::error!("Closed transport in session closing!"), + } + Ok(()) + } + + fn as_any(&self) -> &dyn Any { + self + } + + #[inline] + fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { + tables.whatami != WhatAmI::Router + || face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(face.zid), + ) + } + + #[inline] + fn egress_filter( + &self, + tables: &Tables, + src_face: &FaceState, + out_face: &Arc, + expr: &mut RoutingExpr, + ) -> bool { + if src_face.id != out_face.id + && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { + (Some(l), Some(r)) => l != r, + _ => true, + } + { + let dst_master = tables.whatami != WhatAmI::Router + || out_face.whatami != WhatAmI::Peer + || hat!(tables).peers_net.is_none() + || tables.zid + == *hat!(tables).elect_router( + &tables.zid, + expr.full_expr(), + hat!(tables).get_router_links(out_face.zid), + ); + + return dst_master + && (src_face.whatami != WhatAmI::Peer + || out_face.whatami != WhatAmI::Peer + || hat!(tables).full_net(WhatAmI::Peer) + || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); + } + false + } +} + +struct HatContext { + router_subs: HashSet, + peer_subs: HashSet, + router_qabls: HashMap, + peer_qabls: HashMap, +} + +impl HatContext { + fn new() -> Self { + Self { + router_subs: HashSet::new(), + peer_subs: HashSet::new(), + router_qabls: HashMap::new(), + peer_qabls: HashMap::new(), + } + } +} + +struct HatFace { + link_id: usize, + local_subs: HashSet>, + remote_subs: HashSet>, + local_qabls: HashMap, QueryableInfo>, + remote_qabls: HashSet>, +} + +impl HatFace { + fn new() -> Self { + Self { + link_id: 0, + local_subs: HashSet::new(), + remote_subs: HashSet::new(), + local_qabls: HashMap::new(), + remote_qabls: HashSet::new(), + } + } +} + +fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { + match hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_link(face_hat!(face).link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received router declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in routers network for {}", + face + ); + None + } + } +} + +fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { + match hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_link(face_hat!(face).link_id) + { + Some(link) => match link.get_zid(&(nodeid as u64)) { + Some(router) => Some(*router), + None => { + log::error!( + "Received peer declaration with unknown routing context id {}", + nodeid + ); + None + } + }, + None => { + log::error!( + "Could not find corresponding link in peers network for {}", + face + ); + None + } + } +} + +impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/p2p_peer/network.rs b/zenoh/src/net/routing/hat/p2p_peer/network.rs new file mode 100644 index 0000000000..421850dc87 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/network.rs @@ -0,0 +1,1007 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::codec::Zenoh080Routing; +use crate::net::protocol::linkstate::{LinkState, LinkStateList}; +use crate::net::routing::dispatcher::tables::NodeId; +use crate::net::runtime::Runtime; +use async_std::task; +use petgraph::graph::NodeIndex; +use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; +use std::convert::TryInto; +use vec_map::VecMap; +use zenoh_buffers::writer::{DidntWrite, HasWriter}; +use zenoh_buffers::ZBuf; +use zenoh_codec::WCodec; +use zenoh_link::Locator; +use zenoh_protocol::common::ZExtBody; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::network::oam::id::OAM_LINKSTATE; +use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_transport::TransportUnicast; + +#[derive(Clone)] +struct Details { + zid: bool, + locators: bool, + links: bool, +} + +#[derive(Clone)] +pub(super) struct Node { + pub(super) zid: ZenohId, + pub(super) whatami: Option, + pub(super) locators: Option>, + pub(super) sn: u64, + pub(super) links: Vec, +} + +impl std::fmt::Debug for Node { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.zid) + } +} + +pub(super) struct Link { + pub(super) transport: TransportUnicast, + zid: ZenohId, + mappings: VecMap, + local_mappings: VecMap, +} + +impl Link { + fn new(transport: TransportUnicast) -> Self { + let zid = transport.get_zid().unwrap(); + Link { + transport, + zid, + mappings: VecMap::new(), + local_mappings: VecMap::new(), + } + } + + #[inline] + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + self.mappings.insert(psid.try_into().unwrap(), zid); + } + + #[inline] + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + self.mappings.get((*psid).try_into().unwrap()) + } + + #[inline] + pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { + self.local_mappings + .insert(psid.try_into().unwrap(), local_psid); + } + + #[inline] + pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { + self.local_mappings.get((*psid).try_into().unwrap()) + } +} + +pub(super) struct Changes { + pub(super) updated_nodes: Vec<(NodeIndex, Node)>, + pub(super) removed_nodes: Vec<(NodeIndex, Node)>, +} + +#[derive(Clone)] +pub(super) struct Tree { + pub(super) parent: Option, + pub(super) childs: Vec, + pub(super) directions: Vec>, +} + +pub(super) struct Network { + pub(super) name: String, + pub(super) full_linkstate: bool, + pub(super) router_peers_failover_brokering: bool, + pub(super) gossip: bool, + pub(super) gossip_multihop: bool, + pub(super) autoconnect: WhatAmIMatcher, + pub(super) idx: NodeIndex, + pub(super) links: VecMap, + pub(super) trees: Vec, + pub(super) distances: Vec, + pub(super) graph: petgraph::stable_graph::StableUnGraph, + pub(super) runtime: Runtime, +} + +impl Network { + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + name: String, + zid: ZenohId, + runtime: Runtime, + full_linkstate: bool, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) -> Self { + let mut graph = petgraph::stable_graph::StableGraph::default(); + log::debug!("{} Add node (self) {}", name, zid); + let idx = graph.add_node(Node { + zid, + whatami: Some(runtime.whatami), + locators: None, + sn: 1, + links: vec![], + }); + Network { + name, + full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + idx, + links: VecMap::new(), + trees: vec![Tree { + parent: None, + childs: vec![], + directions: vec![None], + }], + distances: vec![0.0], + graph, + runtime, + } + } + + //noinspection ALL + // pub(super) fn dot(&self) -> String { + // std::format!( + // "{:?}", + // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) + // ) + // } + + #[inline] + pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { + self.graph.node_weights().find(|weight| weight.zid == *zid) + } + + #[inline] + pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + self.graph + .node_indices() + .find(|idx| self.graph[*idx].zid == *zid) + } + + #[inline] + pub(super) fn get_link(&self, id: usize) -> Option<&Link> { + self.links.get(id) + } + + #[inline] + pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + self.links.values().find(|link| link.zid == *zid) + } + + #[inline] + pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { + match self.get_link(link_id) { + Some(link) => match link.get_local_psid(&(context as u64)) { + Some(psid) => (*psid).try_into().unwrap_or(0), + None => { + log::error!( + "Cannot find local psid for context {} on link {}", + context, + link_id + ); + 0 + } + }, + None => { + log::error!("Cannot find link {}", link_id); + 0 + } + } + } + + fn add_node(&mut self, node: Node) -> NodeIndex { + let zid = node.zid; + let idx = self.graph.add_node(node); + for link in self.links.values_mut() { + if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { + link.local_mappings.insert(psid, idx.index() as u64); + } + } + idx + } + + fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { + let links = if details.links { + self.graph[idx] + .links + .iter() + .filter_map(|zid| { + if let Some(idx2) = self.get_idx(zid) { + Some(idx2.index().try_into().unwrap()) + } else { + log::error!( + "{} Internal error building link state: cannot get index of {}", + self.name, + zid + ); + None + } + }) + .collect() + } else { + vec![] + }; + LinkState { + psid: idx.index().try_into().unwrap(), + sn: self.graph[idx].sn, + zid: if details.zid { + Some(self.graph[idx].zid) + } else { + None + }, + whatami: self.graph[idx].whatami, + locators: if details.locators { + if idx == self.idx { + Some(self.runtime.get_locators()) + } else { + self.graph[idx].locators.clone() + } + } else { + None + }, + links, + } + } + + fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { + let mut link_states = vec![]; + for (idx, details) in idxs { + link_states.push(self.make_link_state(idx, details)); + } + let codec = Zenoh080Routing::new(); + let mut buf = ZBuf::empty(); + codec.write(&mut buf.writer(), &LinkStateList { link_states })?; + Ok(NetworkBody::OAM(Oam { + id: OAM_LINKSTATE, + body: ZExtBody::ZBuf(buf), + ext_qos: oam::ext::QoSType::oam_default(), + ext_tstamp: None, + }) + .into()) + } + + fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { + if let Ok(msg) = self.make_msg(idxs) { + log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + if let Err(e) = transport.schedule(msg) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) + where + P: FnMut(&Link) -> bool, + { + if let Ok(msg) = self.make_msg(idxs) { + for link in self.links.values() { + if parameters(link) { + log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + if let Err(e) = link.transport.schedule(msg.clone()) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + // Indicates if locators should be included when propagating Linkstate message + // from the given node. + // Returns true if gossip is enabled and if multihop gossip is enabled or + // the node is one of self neighbours. + fn propagate_locators(&self, idx: NodeIndex) -> bool { + self.gossip + && (self.gossip_multihop + || idx == self.idx + || self.links.values().any(|link| { + self.graph + .node_weight(idx) + .map(|node| link.zid == node.zid) + .unwrap_or(true) + })) + } + + fn update_edge(&mut self, idx1: NodeIndex, idx2: NodeIndex) { + use std::hash::Hasher; + let mut hasher = std::collections::hash_map::DefaultHasher::default(); + if self.graph[idx1].zid > self.graph[idx2].zid { + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + } else { + hasher.write(&self.graph[idx1].zid.to_le_bytes()); + hasher.write(&self.graph[idx2].zid.to_le_bytes()); + } + let weight = 100.0 + ((hasher.finish() as u32) as f64) / u32::MAX as f64; + self.graph.update_edge(idx1, idx2, weight); + } + + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { + log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + + let graph = &self.graph; + let links = &mut self.links; + + let src_link = match links.values_mut().find(|link| link.zid == src) { + Some(link) => link, + None => { + log::error!( + "{} Received LinkStateList from unknown link {}", + self.name, + src + ); + return Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + } + }; + + // register psid<->zid mappings & apply mapping to nodes + #[allow(clippy::needless_collect)] // need to release borrow on self + let link_states = link_states + .into_iter() + .filter_map(|link_state| { + if let Some(zid) = link_state.zid { + src_link.set_zid_mapping(link_state.psid, zid); + if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { + src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); + } + Some(( + zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )) + } else { + match src_link.get_zid(&link_state.psid) { + Some(zid) => Some(( + *zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )), + None => { + log::error!( + "Received LinkState from {} with unknown node mapping {}", + src, + link_state.psid + ); + None + } + } + } + }) + .collect::>(); + + // apply psid<->zid mapping to links + let src_link = self.get_link_from_zid(&src).unwrap(); + let link_states = link_states + .into_iter() + .map(|(zid, wai, locs, sn, links)| { + let links: Vec = links + .iter() + .filter_map(|l| { + if let Some(zid) = src_link.get_zid(l) { + Some(*zid) + } else { + log::error!( + "{} Received LinkState from {} with unknown link mapping {}", + self.name, + src, + l + ); + None + } + }) + .collect(); + (zid, wai, locs, sn, links) + }) + .collect::>(); + + // log::trace!( + // "{} Received from {} mapped: {:?}", + // self.name, + // src, + // link_states + // ); + for link_state in &link_states { + log::trace!( + "{} Received from {} mapped: {:?}", + self.name, + src, + link_state + ); + } + + if !self.full_linkstate { + let mut changes = Changes { + updated_nodes: vec![], + removed_nodes: vec![], + }; + for (zid, whatami, locators, sn, links) in link_states.into_iter() { + let idx = match self.get_idx(&zid) { + None => { + let idx = self.add_node(Node { + zid, + whatami: Some(whatami), + locators: locators.clone(), + sn, + links, + }); + changes.updated_nodes.push((idx, self.graph[idx].clone())); + locators.is_some().then_some(idx) + } + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + (oldsn < sn) + .then(|| { + node.sn = sn; + node.links = links.clone(); + changes.updated_nodes.push((idx, node.clone())); + (node.locators != locators && locators.is_some()).then(|| { + node.locators = locators.clone(); + idx + }) + }) + .flatten() + } + }; + + if self.gossip { + if let Some(idx) = idx { + if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { + self.send_on_links( + vec![( + idx, + Details { + zid: true, + locators: true, + links: false, + }, + )], + |link| link.zid != zid, + ); + } + + if !self.autoconnect.is_empty() { + // Connect discovered peers + if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = locators { + let runtime = self.runtime.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + } + return changes; + } + + // Add nodes to graph & filter out up to date states + let mut link_states = link_states + .into_iter() + .filter_map( + |(zid, whatami, locators, sn, links)| match self.get_idx(&zid) { + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + if oldsn < sn { + node.sn = sn; + node.links = links.clone(); + if locators.is_some() { + node.locators = locators; + } + if oldsn == 0 { + Some((links, idx, true)) + } else { + Some((links, idx, false)) + } + } else { + None + } + } + None => { + let node = Node { + zid, + whatami: Some(whatami), + locators, + sn, + links: links.clone(), + }; + log::debug!("{} Add node (state) {}", self.name, zid); + let idx = self.add_node(node); + Some((links, idx, true)) + } + }, + ) + .collect::, NodeIndex, bool)>>(); + + // Add/remove edges from graph + let mut reintroduced_nodes = vec![]; + for (links, idx1, _) in &link_states { + for link in links { + if let Some(idx2) = self.get_idx(link) { + if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { + log::trace!( + "{} Update edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.update_edge(*idx1, idx2); + } + } else { + let node = Node { + zid: *link, + whatami: None, + locators: None, + sn: 0, + links: vec![], + }; + log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); + let idx = self.add_node(node); + reintroduced_nodes.push((vec![], idx, true)); + } + } + let mut edges = vec![]; + let mut neighbors = self.graph.neighbors_undirected(*idx1).detach(); + while let Some(edge) = neighbors.next(&self.graph) { + edges.push(edge); + } + for (eidx, idx2) in edges { + if !links.contains(&self.graph[idx2].zid) { + log::trace!( + "{} Remove edge (state) {} {}", + self.name, + self.graph[*idx1].zid, + self.graph[idx2].zid + ); + self.graph.remove_edge(eidx); + } + } + } + link_states.extend(reintroduced_nodes); + + let removed = self.remove_detached_nodes(); + let link_states = link_states + .into_iter() + .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) + .collect::, NodeIndex, bool)>>(); + + if !self.autoconnect.is_empty() { + // Connect discovered peers + for (_, idx, _) in &link_states { + let node = &self.graph[*idx]; + if let Some(whatami) = node.whatami { + if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = &node.locators { + let runtime = self.runtime.clone(); + let zid = node.zid; + let locators = locators.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + + // Propagate link states + // Note: we need to send all states at once for each face + // to avoid premature node deletion on the other side + #[allow(clippy::type_complexity)] + if !link_states.is_empty() { + let (new_idxs, updated_idxs): ( + Vec<(Vec, NodeIndex, bool)>, + Vec<(Vec, NodeIndex, bool)>, + ) = link_states.into_iter().partition(|(_, _, new)| *new); + let new_idxs = new_idxs + .into_iter() + .map(|(_, idx1, _new_node)| { + ( + idx1, + Details { + zid: true, + locators: self.propagate_locators(idx1), + links: true, + }, + ) + }) + .collect::>(); + for link in self.links.values() { + if link.zid != src { + let updated_idxs: Vec<(NodeIndex, Details)> = updated_idxs + .clone() + .into_iter() + .filter_map(|(_, idx1, _)| { + if link.zid != self.graph[idx1].zid { + Some(( + idx1, + Details { + zid: false, + locators: self.propagate_locators(idx1), + links: true, + }, + )) + } else { + None + } + }) + .collect(); + if !new_idxs.is_empty() || !updated_idxs.is_empty() { + self.send_on_link( + [&new_idxs[..], &updated_idxs[..]].concat(), + &link.transport, + ); + } + } else if !new_idxs.is_empty() { + self.send_on_link(new_idxs.clone(), &link.transport); + } + } + } + Changes { + updated_nodes: vec![], + removed_nodes: removed, + } + } + + pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { + let free_index = { + let mut i = 0; + while self.links.contains_key(i) { + i += 1; + } + i + }; + self.links.insert(free_index, Link::new(transport.clone())); + + let zid = transport.get_zid().unwrap(); + let whatami = transport.get_whatami().unwrap(); + + if self.full_linkstate || self.router_peers_failover_brokering { + let (idx, new) = match self.get_idx(&zid) { + Some(idx) => (idx, false), + None => { + log::debug!("{} Add node (link) {}", self.name, zid); + ( + self.add_node(Node { + zid, + whatami: Some(whatami), + locators: None, + sn: 0, + links: vec![], + }), + true, + ) + } + }; + if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { + log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); + self.update_edge(self.idx, idx); + } + self.graph[self.idx].links.push(zid); + self.graph[self.idx].sn += 1; + + // Send updated self linkstate on all existing links except new one + self.links + .values() + .filter(|link| { + link.zid != zid + && (self.full_linkstate + || link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router) + }) + .for_each(|link| { + self.send_on_link( + if new || (!self.full_linkstate && !self.gossip_multihop) { + vec![ + ( + idx, + Details { + zid: true, + locators: false, + links: false, + }, + ), + ( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + ), + ] + } else { + vec![( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + )] + }, + &link.transport, + ) + }); + } + + // Send all nodes linkstate on new link + let idxs = self + .graph + .node_indices() + .filter_map(|idx| { + (self.full_linkstate + || self.gossip_multihop + || self.links.values().any(|link| link.zid == zid) + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router)) + .then(|| { + ( + idx, + Details { + zid: true, + locators: self.propagate_locators(idx), + links: self.full_linkstate + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router), + }, + ) + }) + }) + .collect(); + self.send_on_link(idxs, &transport); + free_index + } + + pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + log::trace!("{} remove_link {}", self.name, zid); + self.links.retain(|_, link| link.zid != *zid); + self.graph[self.idx].links.retain(|link| *link != *zid); + + if self.full_linkstate { + if let Some((edge, _)) = self + .get_idx(zid) + .and_then(|idx| self.graph.find_edge_undirected(self.idx, idx)) + { + self.graph.remove_edge(edge); + } + let removed = self.remove_detached_nodes(); + + self.graph[self.idx].sn += 1; + + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |_| true, + ); + + removed + } else { + if let Some(idx) = self.get_idx(zid) { + self.graph.remove_node(idx); + } + if self.router_peers_failover_brokering { + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |link| { + link.zid != *zid + && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) + == WhatAmI::Router + }, + ); + } + vec![] + } + } + + fn remove_detached_nodes(&mut self) -> Vec<(NodeIndex, Node)> { + let mut dfs_stack = vec![self.idx]; + let mut visit_map = self.graph.visit_map(); + while let Some(node) = dfs_stack.pop() { + if visit_map.visit(node) { + for succzid in &self.graph[node].links { + if let Some(succ) = self.get_idx(succzid) { + if !visit_map.is_visited(&succ) { + dfs_stack.push(succ); + } + } + } + } + } + + let mut removed = vec![]; + for idx in self.graph.node_indices().collect::>() { + if !visit_map.is_visited(&idx) { + log::debug!("Remove node {}", &self.graph[idx].zid); + removed.push((idx, self.graph.remove_node(idx).unwrap())); + } + } + removed + } + + pub(super) fn compute_trees(&mut self) -> Vec> { + let indexes = self.graph.node_indices().collect::>(); + let max_idx = indexes.iter().max().unwrap(); + + let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); + + self.trees.clear(); + self.trees.resize_with(max_idx.index() + 1, || Tree { + parent: None, + childs: vec![], + directions: vec![], + }); + + for tree_root_idx in &indexes { + let paths = petgraph::algo::bellman_ford(&self.graph, *tree_root_idx).unwrap(); + + if tree_root_idx.index() == 0 { + self.distances = paths.distances; + } + + if log::log_enabled!(log::Level::Debug) { + let ps: Vec> = paths + .predecessors + .iter() + .enumerate() + .map(|(is, o)| { + o.map(|ip| { + format!( + "{} <- {}", + self.graph[ip].zid, + self.graph[NodeIndex::new(is)].zid + ) + }) + }) + .collect(); + log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); + } + + self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; + + for idx in &indexes { + if let Some(parent_idx) = paths.predecessors[idx.index()] { + if parent_idx == self.idx { + self.trees[tree_root_idx.index()].childs.push(*idx); + } + } + } + + self.trees[tree_root_idx.index()] + .directions + .resize_with(max_idx.index() + 1, || None); + let mut dfs = petgraph::algo::DfsSpace::new(&self.graph); + for destination in &indexes { + if self.idx != *destination + && petgraph::algo::has_path_connecting( + &self.graph, + self.idx, + *destination, + Some(&mut dfs), + ) + { + let mut direction = None; + let mut current = *destination; + while let Some(parent) = paths.predecessors[current.index()] { + if parent == self.idx { + direction = Some(current); + break; + } else { + current = parent; + } + } + + self.trees[tree_root_idx.index()].directions[destination.index()] = + match direction { + Some(direction) => Some(direction), + None => self.trees[tree_root_idx.index()].parent, + }; + } + } + } + + let mut new_childs = Vec::with_capacity(self.trees.len()); + new_childs.resize(self.trees.len(), vec![]); + + for i in 0..new_childs.len() { + new_childs[i] = if i < old_childs.len() { + self.trees[i] + .childs + .iter() + .filter(|idx| !old_childs[i].contains(idx)) + .cloned() + .collect() + } else { + self.trees[i].childs.clone() + }; + } + + new_childs + } + + #[inline] + pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { + self.get_node(&node) + .map(|node| &node.links[..]) + .unwrap_or_default() + } +} + +#[inline] +pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { + net1.graph + .node_references() + .filter_map(|(_, node1)| { + net2.graph + .node_references() + .any(|(_, node2)| node1.zid == node2.zid) + .then_some(node1.zid) + }) + .collect() +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs new file mode 100644 index 0000000000..189e6cb6e8 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -0,0 +1,1650 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::pubsub::*; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet}; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_core::zread; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, + DeclareSubscriber, Mode, UndeclareSubscriber, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[inline] +fn send_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + sub_info: &SubscriberInfo, + routing_context: NodeId, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +#[inline] +fn propagate_simple_subscription_to( + tables: &mut Tables, + dst_face: &mut Arc, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, + full_peer_net: bool, +) { + if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) + && !face_hat!(dst_face).local_subs.contains(res) + && match tables.whatami { + WhatAmI::Router => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) + } + } + WhatAmI::Peer => { + if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client + } + } + _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, + } + { + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *sub_info, + }), + }, + res.expr(), + )); + } +} + +fn propagate_simple_subscription( + tables: &mut Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: &mut Arc, +) { + let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); + for mut dst_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + propagate_simple_subscription_to( + tables, + &mut dst_face, + res, + sub_info, + src_face, + full_peer_net, + ); + } +} + +fn propagate_sourced_subscription( + tables: &Tables, + res: &Arc, + sub_info: &SubscriberInfo, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + sub_info, + tree_sid.index() as NodeId, + ); + } else { + log::trace!( + "Propagating sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + if !res_hat!(res).router_subs.contains(&router) { + // Register router subscription + { + log::debug!( + "Register router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.insert(router); + hat_mut!(tables).router_subs.insert(res.clone()); + } + + // Propagate subscription to routers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); + } + // Propagate subscription to peers + if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { + register_peer_subscription(tables, face, res, sub_info, tables.zid) + } + + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); +} + +fn declare_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_subscription(&mut wtables, face, &mut res, sub_info, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_peer_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + if !res_hat!(res).peer_subs.contains(&peer) { + // Register peer subscription + { + log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); + res_hat_mut!(res).peer_subs.insert(peer); + hat_mut!(tables).peer_subs.insert(res.clone()); + } + + // Propagate subscription to peers + propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate subscription to clients + propagate_simple_subscription(tables, res, sub_info, face); + } +} + +fn declare_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); + if wtables.whatami == WhatAmI::Router { + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = wtables.zid; + register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!( + "Declare router subscription for unknown scope {}!", + expr.scope + ), + } +} + +fn register_client_subscription( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + sub_info: &SubscriberInfo, +) { + // Register subscription + { + let res = get_mut_unchecked(res); + log::debug!("Register subscription {} for {}", res.expr(), face); + match res.session_ctxs.get_mut(&face.id) { + Some(ctx) => match &ctx.subs { + Some(info) => { + if Mode::Pull == info.mode { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + } + None => { + get_mut_unchecked(ctx).subs = Some(*sub_info); + } + }, + None => { + res.session_ctxs.insert( + face.id, + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: Some(*sub_info), + qabl: None, + last_values: HashMap::new(), + }), + ); + } + } + } + face_hat_mut!(face).remote_subs.insert(res.clone()); +} + +fn declare_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, +) { + log::debug!("Register client subscription"); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_subscription(&mut wtables, face, &mut res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + match wtables.whatami { + WhatAmI::Router => { + let zid = wtables.zid; + register_router_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let zid = wtables.zid; + register_peer_subscription( + &mut wtables, + face, + &mut res, + &propa_sub_info, + zid, + ); + } else { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) + } + } + } + _ => { + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) + } + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + drop(wtables); + } + None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_subs + .iter() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_subs(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.subs.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_subscription_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: Option, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget subscription {} on {}", res.expr(), someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context.unwrap_or(0), + }, + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_subs.contains(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_subs.len() == 1 + && res_hat!(res).router_subs.contains(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_subs.contains(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(&mut face).local_subs.remove(res); + } + } + } +} + +fn propagate_forget_sourced_subscription( + tables: &Tables, + res: &Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_subscription_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + Some(tree_sid.index() as NodeId), + ); + } else { + log::trace!( + "Propagating forget sub {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget sub {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router subscription {} (router: {})", + res.expr(), + router + ); + res_hat_mut!(res).router_subs.retain(|sub| sub != router); + + if res_hat!(res).router_subs.is_empty() { + hat_mut!(tables) + .router_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if hat_mut!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_subscription(tables, res); + } + + propagate_forget_simple_subscription_to_peers(tables, res); +} + +fn undeclare_router_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_subs.contains(router) { + unregister_router_subscription(tables, res, router); + propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router subscription!"), + }, + None => log::error!("Undeclare router subscription with unknown scope!"), + } +} + +fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!( + "Unregister peer subscription {} (peer: {})", + res.expr(), + peer + ); + res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); + + if res_hat!(res).peer_subs.is_empty() { + hat_mut!(tables) + .peer_subs + .retain(|sub| !Arc::ptr_eq(sub, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_subscription(tables, res); + } + } +} + +fn undeclare_peer_subscription( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_subs.contains(peer) { + unregister_peer_subscription(tables, res, peer); + propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); + if wtables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(&wtables, &res); + let zid = wtables.zid; + if !client_subs && !peer_subs { + undeclare_router_subscription(&mut wtables, None, &mut res, &zid); + } + } + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer subscription!"), + }, + None => log::error!("Undeclare peer subscription with unknown scope!"), + } +} + +pub(super) fn undeclare_client_subscription( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client subscription {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).subs = None; + } + face_hat_mut!(face).remote_subs.remove(res); + + let mut client_subs = client_subs(res); + let router_subs = remote_router_subs(tables, res); + let peer_subs = remote_peer_subs(tables, res); + match tables.whatami { + WhatAmI::Router => { + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if client_subs.is_empty() { + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription(tables, res); + } + } + } + _ => { + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); + } + } + } + if client_subs.len() == 1 && !router_subs && !peer_subs { + let face = &mut client_subs[0]; + if face_hat!(face).local_subs.contains(res) + && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_subs.remove(res); + } + } +} + +fn forget_client_subscription( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_subscription(&mut wtables, face, &mut res); + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown subscription!"), + }, + None => log::error!("Undeclare subscription with unknown scope!"), + } +} + +pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).router_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).peer_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); + } + } + } else { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } + WhatAmI::Client => { + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to( + tables, + face, + sub, + &sub_info, + &mut src_face.clone(), + false, + ); + } + } + } + } +} + +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + for mut res in hat!(tables) + .router_subs + .iter() + .filter(|res| res_hat!(res).router_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_router_subscription(tables, &mut res, node); + + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + WhatAmI::Peer => { + for mut res in hat!(tables) + .peer_subs + .iter() + .filter(|res| res_hat!(res).peer_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_subscription(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(tables, &res); + if !client_subs && !peer_subs { + undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); + } + } + + // compute_matches_data_routes(tables, &mut res); + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn pubsub_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate subs to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let subs_res = match net_type { + WhatAmI::Router => &hat!(tables).router_subs, + _ => &hat!(tables).peer_subs, + }; + + for res in subs_res { + let subs = match net_type { + WhatAmI::Router => &res_hat!(res).router_subs, + _ => &res_hat!(res).peer_subs, + }; + for sub in subs { + if *sub == tree_id { + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // @TODO + mode: Mode::Push, + }; + send_sourced_subscription_to_net_childs( + tables, + net, + tree_childs, + res, + None, + &sub_info, + tree_sid as NodeId, + ); + } + } + } + } + } + } + + // recompute routes + compute_data_routes_from(tables, &mut tables.root_res.clone()); +} + +pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid).cloned() { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_subs { + let client_subs = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); + if !remote_router_subs(tables, res) && !client_subs { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_subs.contains(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.subs.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareSubscriber( + UndeclareSubscriber { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); + + face_hat_mut!(dst_face).local_subs.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + face_hat_mut!(dst_face).local_subs.insert(res.clone()); + let key_expr = Resource::decl_key(res, dst_face); + let sub_info = SubscriberInfo { + reliability: Reliability::Reliable, // TODO + mode: Mode::Push, + }; + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + res.expr(), + )); + } + } + } + } + } + } + } +} + +#[inline] +fn insert_faces_for_subs( + route: &mut Route, + expr: &RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + subs: &HashSet, +) { + if net.trees.len() > source as usize { + for sub in subs { + if let Some(sub_idx) = net.get_idx(sub) { + if net.trees[source as usize].directions.len() > sub_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + route.entry(face.id).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + (face.clone(), key_expr.to_owned(), source) + }); + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +impl HatPubSubTrait for HatCode { + fn declare_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: NodeId, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_subscription(tables, rtables, face, expr, sub_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + } + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) + } + } + _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } + } + + fn forget_subscription( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: NodeId, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_subscription(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer) + } + } else { + forget_client_subscription(tables, rtables, face, expr) + } + } + _ => forget_client_subscription(tables, rtables, face, expr), + } + } + + fn compute_data_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: NodeId, + source_type: WhatAmI, + ) -> Arc { + let mut route = HashMap::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return Arc::new(route); + } + log::trace!( + "compute_data_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return Arc::new(route); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as NodeId, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_subs, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); + } + } + } + } + } + for mcast_group in &tables.mcast_groups { + route.insert( + mcast_group.id, + ( + mcast_group.clone(), + expr.full_expr().to_string().into(), + NodeId::default(), + ), + ); + } + Arc::new(route) + } + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = vec![]; + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return Arc::new(pull_caches); + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } + Arc::new(pull_caches) + } + + fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes { + matching_pulls: None, + routers_data_routes: vec![], + peers_data_routes: vec![], + peer_data_route: None, + client_data_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Router, + ); + } + + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routes.peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + routes.client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + } + routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes + } + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + routers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + for idx in &indexes { + peers_data_routes[idx.index()] = self.compute_data_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + } + res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + } + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs new file mode 100644 index 0000000000..c1093a8a00 --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -0,0 +1,1788 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use super::network::Network; +use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use crate::net::routing::dispatcher::face::FaceState; +use crate::net::routing::dispatcher::queries::*; +use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::{ + QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, +}; +use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; +use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; +use ordered_float::OrderedFloat; +use petgraph::graph::NodeIndex; +use std::borrow::Cow; +use std::collections::HashMap; +use std::sync::{Arc, RwLockReadGuard}; +use zenoh_buffers::ZBuf; +use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::{ + core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + network::declare::{ + common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, + DeclareQueryable, UndeclareQueryable, + }, +}; +use zenoh_sync::get_mut_unchecked; + +#[cfg(feature = "complete_n")] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete += info.complete; + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +#[cfg(not(feature = "complete_n"))] +#[inline] +fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { + this.complete = u8::from(this.complete != 0 || info.complete != 0); + this.distance = std::cmp::min(this.distance, info.distance); + this +} + +fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if hat!(tables).full_net(WhatAmI::Peer) { + res.context.as_ref().and_then(|_| { + res_hat!(res) + .peer_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { + let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { + let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + res_hat!(res) + .router_qabls + .iter() + .fold(None, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } else { + None + }; + if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { + info = res_hat!(res) + .peer_qabls + .iter() + .fold(info, |accu, (zid, info)| { + if *zid != tables.zid { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + }) + } + res.session_ctxs + .values() + .fold(info, |accu, ctx| { + if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer + || face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(ctx.face.zid, face.zid) + { + if let Some(info) = ctx.qabl.as_ref() { + Some(match accu { + Some(accu) => merge_qabl_infos(accu, info), + None => *info, + }) + } else { + accu + } + } else { + accu + } + }) + .unwrap_or(QueryableInfo { + complete: 0, + distance: 0, + }) +} + +#[allow(clippy::too_many_arguments)] +#[inline] +fn send_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + routing_context: NodeId, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { + let key_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: *qabl_info, + }), + }, + res.expr(), + )); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_simple_queryable( + tables: &mut Tables, + res: &Arc, + src_face: Option<&mut Arc>, +) { + let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); + let faces = tables.faces.values().cloned(); + for mut dst_face in faces { + let info = local_qabl_info(tables, res, &dst_face); + let current_info = face_hat!(dst_face).local_qabls.get(res); + if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) + && (current_info.is_none() || *current_info.unwrap() != info) + && match tables.whatami { + WhatAmI::Router => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering( + src_face.as_ref().unwrap().zid, + dst_face.zid, + )) + } + } + WhatAmI::Peer => { + if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + _ => { + src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client + } + } + { + face_hat_mut!(&mut dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, &mut dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } +} + +fn propagate_sourced_queryable( + tables: &Tables, + res: &Arc, + qabl_info: &QueryableInfo, + src_face: Option<&mut Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + qabl_info, + src_face, + tree_sid.index() as NodeId, + ); + } else { + log::trace!( + "Propagating qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn register_router_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + let current_info = res_hat!(res).router_qabls.get(&router); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register router queryable + { + log::debug!( + "Register router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.insert(router, *qabl_info); + hat_mut!(tables).router_qabls.insert(res.clone()); + } + + // Propagate queryable to routers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &router, + WhatAmI::Router, + ); + } + + if hat!(tables).full_net(WhatAmI::Peer) { + // Propagate queryable to peers + if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) + } + } + + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); +} + +fn declare_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + router: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register router queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_peer_queryable( + tables: &mut Tables, + mut face: Option<&mut Arc>, + res: &mut Arc, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + let current_info = res_hat!(res).peer_qabls.get(&peer); + if current_info.is_none() || current_info.unwrap() != qabl_info { + // Register peer queryable + { + log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); + hat_mut!(tables).peer_qabls.insert(res.clone()); + } + + // Propagate queryable to peers + propagate_sourced_queryable( + tables, + res, + qabl_info, + face.as_deref_mut(), + &peer, + WhatAmI::Peer, + ); + } + + if tables.whatami == WhatAmI::Peer { + // Propagate queryable to clients + propagate_simple_queryable(tables, res, face); + } +} + +fn declare_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + peer: ZenohId, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register peer queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + let mut face = Some(face); + register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); + if wtables.whatami == WhatAmI::Router { + let local_info = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), + } +} + +fn register_client_queryable( + _tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, + qabl_info: &QueryableInfo, +) { + // Register queryable + { + let res = get_mut_unchecked(res); + log::debug!("Register queryable {} (face: {})", res.expr(), face,); + get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { + Arc::new(SessionContext { + face: face.clone(), + local_expr_id: None, + remote_expr_id: None, + subs: None, + qabl: None, + last_values: HashMap::new(), + }) + })) + .qabl = Some(*qabl_info); + } + face_hat_mut!(face).remote_qabls.insert(res.clone()); +} + +fn declare_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, +) { + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + log::debug!("Register client queryable {}", fullexpr); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + register_client_queryable(&mut wtables, face, &mut res, qabl_info); + + match wtables.whatami { + WhatAmI::Router => { + let local_details = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } + WhatAmI::Peer => { + if hat!(wtables).full_net(WhatAmI::Peer) { + let local_details = local_peer_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_peer_queryable( + &mut wtables, + Some(face), + &mut res, + &local_details, + zid, + ); + } else { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + _ => { + propagate_simple_queryable(&mut wtables, &res, Some(face)); + } + } + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + } +} + +#[inline] +fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .router_qabls + .keys() + .any(|router| router != &tables.zid) +} + +#[inline] +fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { + res.context.is_some() + && res_hat!(res) + .peer_qabls + .keys() + .any(|peer| peer != &tables.zid) +} + +#[inline] +fn client_qabls(res: &Arc) -> Vec> { + res.session_ctxs + .values() + .filter_map(|ctx| { + if ctx.qabl.is_some() { + Some(ctx.face.clone()) + } else { + None + } + }) + .collect() +} + +#[inline] +fn send_forget_sourced_queryable_to_net_childs( + tables: &Tables, + net: &Network, + childs: &[NodeIndex], + res: &Arc, + src_face: Option<&Arc>, + routing_context: NodeId, +) { + for child in childs { + if net.graph.contains_node(*child) { + match tables.get_face(&net.graph[*child].zid).cloned() { + Some(mut someface) => { + if src_face.is_none() || someface.id != src_face.unwrap().id { + let wire_expr = Resource::decl_key(res, &mut someface); + + log::debug!("Send forget queryable {} on {}", res.expr(), someface); + + someface.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { + node_id: routing_context, + }, + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + } + } + None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), + } + } + } +} + +fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { + for face in tables.faces.values_mut() { + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc) { + if !hat!(tables).full_net(WhatAmI::Peer) + && res_hat!(res).router_qabls.len() == 1 + && res_hat!(res).router_qabls.contains_key(&tables.zid) + { + for mut face in tables + .faces + .values() + .cloned() + .collect::>>() + { + if face.whatami == WhatAmI::Peer + && face_hat!(face).local_qabls.contains_key(res) + && !res.session_ctxs.values().any(|s| { + face.zid != s.face.zid + && s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + }) + { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(&mut face).local_qabls.remove(res); + } + } + } +} + +fn propagate_forget_sourced_queryable( + tables: &mut Tables, + res: &mut Arc, + src_face: Option<&Arc>, + source: &ZenohId, + net_type: WhatAmI, +) { + let net = hat!(tables).get_net(net_type).unwrap(); + match net.get_idx(source) { + Some(tree_sid) => { + if net.trees.len() > tree_sid.index() { + send_forget_sourced_queryable_to_net_childs( + tables, + net, + &net.trees[tree_sid.index()].childs, + res, + src_face, + tree_sid.index() as NodeId, + ); + } else { + log::trace!( + "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", + res.expr(), + tree_sid.index(), + source + ); + } + } + None => log::error!( + "Error propagating forget qabl {}: cannot get index of {}!", + res.expr(), + source + ), + } +} + +fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { + log::debug!( + "Unregister router queryable {} (router: {})", + res.expr(), + router, + ); + res_hat_mut!(res).router_qabls.remove(router); + + if res_hat!(res).router_qabls.is_empty() { + hat_mut!(tables) + .router_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if hat!(tables).full_net(WhatAmI::Peer) { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } + propagate_forget_simple_queryable(tables, res); + } + + propagate_forget_simple_queryable_to_peers(tables, res); +} + +fn undeclare_router_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + router: &ZenohId, +) { + if res_hat!(res).router_qabls.contains_key(router) { + unregister_router_queryable(tables, res, router); + propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); + } +} + +fn forget_router_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + router: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown router queryable!"), + }, + None => log::error!("Undeclare router queryable with unknown scope!"), + } +} + +fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { + log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); + res_hat_mut!(res).peer_qabls.remove(peer); + + if res_hat!(res).peer_qabls.is_empty() { + hat_mut!(tables) + .peer_qabls + .retain(|qabl| !Arc::ptr_eq(qabl, res)); + + if tables.whatami == WhatAmI::Peer { + propagate_forget_simple_queryable(tables, res); + } + } +} + +fn undeclare_peer_queryable( + tables: &mut Tables, + face: Option<&Arc>, + res: &mut Arc, + peer: &ZenohId, +) { + if res_hat!(res).peer_qabls.contains_key(peer) { + unregister_peer_queryable(tables, res, peer); + propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); + } +} + +fn forget_peer_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, + peer: &ZenohId, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); + + if wtables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(&wtables, &res); + let zid = wtables.zid; + if !client_qabls && !peer_qabls { + undeclare_router_queryable(&mut wtables, None, &mut res, &zid); + } else { + let local_info = local_router_qabl_info(&wtables, &res); + register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); + } + } + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown peer queryable!"), + }, + None => log::error!("Undeclare peer queryable with unknown scope!"), + } +} + +pub(super) fn undeclare_client_queryable( + tables: &mut Tables, + face: &mut Arc, + res: &mut Arc, +) { + log::debug!("Unregister client queryable {} for {}", res.expr(), face); + if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { + get_mut_unchecked(ctx).qabl = None; + if ctx.qabl.is_none() { + face_hat_mut!(face).remote_qabls.remove(res); + } + } + + let mut client_qabls = client_qabls(res); + let router_qabls = remote_router_qabls(tables, res); + let peer_qabls = remote_peer_qabls(tables, res); + + match tables.whatami { + WhatAmI::Router => { + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); + } + } else if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + _ => { + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); + } + } + } + + if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + let face = &mut client_qabls[0]; + if face_hat!(face).local_qabls.contains_key(res) { + let wire_expr = Resource::get_best_key(res, "", face.id); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable(UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }), + }, + res.expr(), + )); + + face_hat_mut!(face).local_qabls.remove(res); + } + } +} + +fn forget_client_queryable( + tables: &TablesLock, + rtables: RwLockReadGuard, + face: &mut Arc, + expr: &WireExpr, +) { + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + undeclare_client_queryable(&mut wtables, face, &mut res); + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown queryable!"), + }, + None => log::error!("Undeclare queryable with unknown scope!"), + } +} + +pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { + match tables.whatami { + WhatAmI::Router => { + if face.whatami == WhatAmI::Client { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables) + .failover_brokering(s.face.zid, face.zid))) + })) + { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } + WhatAmI::Peer => { + if hat!(tables).full_net(WhatAmI::Peer) { + if face.whatami == WhatAmI::Client { + for qabl in &hat!(tables).peer_qabls { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); + } + } + } + } else { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } + WhatAmI::Client => { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); + } + } + } + } +} + +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { + match net_type { + WhatAmI::Router => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_router_queryable(tables, &mut res, node); + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + } + } + WhatAmI::Peer => { + let mut qabls = vec![]; + for res in hat!(tables).router_qabls.iter() { + for qabl in res_hat!(res).router_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); + } + } + } + for mut res in qabls { + unregister_peer_queryable(tables, &mut res, node); + + if tables.whatami == WhatAmI::Router { + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(tables, &res); + if !client_qabls && !peer_qabls { + undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, &res); + register_router_queryable(tables, None, &mut res, &local_info, tables.zid); + } + } + + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res) + } + } + _ => (), + } +} + +pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { + if let Some(src_face) = tables.get_face(zid) { + if hat!(tables).router_peers_failover_brokering + && tables.whatami == WhatAmI::Router + && src_face.whatami == WhatAmI::Peer + { + for res in &face_hat!(src_face).remote_qabls { + let client_qabls = res + .session_ctxs + .values() + .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); + if !remote_router_qabls(tables, res) && !client_qabls { + for ctx in get_mut_unchecked(&mut res.clone()) + .session_ctxs + .values_mut() + { + let dst_face = &mut get_mut_unchecked(ctx).face; + if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { + if face_hat!(dst_face).local_qabls.contains_key(res) { + let forget = !HatTables::failover_brokering_to(links, dst_face.zid) + && { + let ctx_links = hat!(tables) + .peers_net + .as_ref() + .map(|net| net.get_links(dst_face.zid)) + .unwrap_or_else(|| &[]); + res.session_ctxs.values().any(|ctx2| { + ctx2.face.whatami == WhatAmI::Peer + && ctx2.qabl.is_some() + && HatTables::failover_brokering_to( + ctx_links, + ctx2.face.zid, + ) + }) + }; + if forget { + let wire_expr = Resource::get_best_key(res, "", dst_face.id); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::UndeclareQueryable( + UndeclareQueryable { + id: 0, // TODO + ext_wire_expr: WireExprType { wire_expr }, + }, + ), + }, + res.expr(), + )); + + face_hat_mut!(dst_face).local_qabls.remove(res); + } + } else if HatTables::failover_brokering_to(links, ctx.face.zid) { + let dst_face = &mut get_mut_unchecked(ctx).face; + let info = local_qabl_info(tables, res, dst_face); + face_hat_mut!(dst_face) + .local_qabls + .insert(res.clone(), info); + let key_expr = Resource::decl_key(res, dst_face); + dst_face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + res.expr(), + )); + } + } + } + } + } + } + } +} + +pub(super) fn queries_tree_change( + tables: &mut Tables, + new_childs: &[Vec], + net_type: WhatAmI, +) { + // propagate qabls to new childs + for (tree_sid, tree_childs) in new_childs.iter().enumerate() { + if !tree_childs.is_empty() { + let net = hat!(tables).get_net(net_type).unwrap(); + let tree_idx = NodeIndex::new(tree_sid); + if net.graph.contains_node(tree_idx) { + let tree_id = net.graph[tree_idx].zid; + + let qabls_res = match net_type { + WhatAmI::Router => &hat!(tables).router_qabls, + _ => &hat!(tables).peer_qabls, + }; + + for res in qabls_res { + let qabls = match net_type { + WhatAmI::Router => &res_hat!(res).router_qabls, + _ => &res_hat!(res).peer_qabls, + }; + if let Some(qabl_info) = qabls.get(&tree_id) { + send_sourced_queryable_to_net_childs( + tables, + net, + tree_childs, + res, + qabl_info, + None, + tree_sid as NodeId, + ); + } + } + } + } + } + + // recompute routes + compute_query_routes_from(tables, &mut tables.root_res.clone()); +} + +#[inline] +#[allow(clippy::too_many_arguments)] +fn insert_target_for_qabls( + route: &mut QueryTargetQablSet, + expr: &mut RoutingExpr, + tables: &Tables, + net: &Network, + source: NodeId, + qabls: &HashMap, + complete: bool, +) { + if net.trees.len() > source as usize { + for (qabl, qabl_info) in qabls { + if let Some(qabl_idx) = net.get_idx(qabl) { + if net.trees[source as usize].directions.len() > qabl_idx.index() { + if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] + { + if net.graph.contains_node(direction) { + if let Some(face) = tables.get_face(&net.graph[direction].zid) { + if net.distances.len() > qabl_idx.index() { + let key_expr = + Resource::get_best_key(expr.prefix, expr.suffix, face.id); + route.push(QueryTargetQabl { + direction: (face.clone(), key_expr.to_owned(), source), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: net.distances[qabl_idx.index()], + }); + } + } + } + } + } + } + } + } else { + log::trace!("Tree for node sid:{} not yet ready", source); + } +} + +lazy_static::lazy_static! { + static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); +} + +impl HatQueriesTrait for HatCode { + fn declare_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: NodeId, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + } + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info) + } + } + _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } + } + + fn forget_queryable( + &self, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: NodeId, + ) { + let rtables = zread!(tables.tables); + match (rtables.whatami, face.whatami) { + (WhatAmI::Router, WhatAmI::Router) => { + if let Some(router) = get_router(&rtables, face, node_id) { + forget_router_queryable(tables, rtables, face, expr, &router) + } + } + (WhatAmI::Router, WhatAmI::Peer) + | (WhatAmI::Peer, WhatAmI::Router) + | (WhatAmI::Peer, WhatAmI::Peer) => { + if hat!(rtables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer) + } + } else { + forget_client_queryable(tables, rtables, face, expr) + } + } + _ => forget_client_queryable(tables, rtables, face, expr), + } + } + + fn compute_query_route( + &self, + tables: &Tables, + expr: &mut RoutingExpr, + source: NodeId, + source_type: WhatAmI, + ) -> Arc { + let mut route = QueryTargetQablSet::new(); + let key_expr = expr.full_expr(); + if key_expr.ends_with('/') { + return EMPTY_ROUTE.clone(); + } + log::trace!( + "compute_query_route({}, {:?}, {:?})", + key_expr, + source, + source_type + ); + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return EMPTY_ROUTE.clone(); + } + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + + let master = tables.whatami != WhatAmI::Router + || !hat!(tables).full_net(WhatAmI::Peer) + || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) + == tables.zid; + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); + if tables.whatami == WhatAmI::Router { + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as NodeId, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_qabls, + complete, + ); + } + + if (master || source_type != WhatAmI::Router) + && hat!(tables).full_net(WhatAmI::Peer) + { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + } + + if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); + } + + if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client + } + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: ( + context.face.clone(), + key_expr.to_owned(), + NodeId::default(), + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); + } + } + } + } + } + route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); + Arc::new(route) + } + + #[inline] + fn compute_local_replies( + &self, + tables: &Tables, + prefix: &Arc, + suffix: &str, + face: &Arc, + ) -> Vec<(WireExpr<'static>, ZBuf)> { + let mut result = vec![]; + // Only the first routing point in the query route + // should return the liveliness tokens + if face.whatami == WhatAmI::Client { + let key_expr = prefix.expr() + suffix; + let key_expr = match OwnedKeyExpr::try_from(key_expr) { + Ok(ke) => ke, + Err(e) => { + log::warn!("Invalid KE reached the system: {}", e); + return result; + } + }; + if key_expr.starts_with(PREFIX_LIVELINESS) { + let res = Resource::get_resource(prefix, suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + if (mres.context.is_some() + && (!res_hat!(mres).router_subs.is_empty() + || !res_hat!(mres).peer_subs.is_empty())) + || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) + { + result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); + } + } + } + } + result + } + + fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes { + routers_query_routes: vec![], + peers_query_routes: vec![], + peer_query_route: None, + client_query_route: None, + }; + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Router, + ); + } + + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routes.peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + } + if tables.whatami == WhatAmI::Client { + routes.client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + } + routes + } + + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + if tables.whatami == WhatAmI::Router { + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + routers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Router, + ); + } + + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Peer, + )); + } + if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) + && hat!(tables).full_net(WhatAmI::Peer) + { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + for idx in &indexes { + peers_query_routes[idx.index()] = self.compute_query_route( + tables, + &mut expr, + idx.index() as NodeId, + WhatAmI::Peer, + ); + } + } + if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Peer, + )); + } + if tables.whatami == WhatAmI::Client { + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); + } + } + } +} diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index db7d1170bb..0fb23b6b26 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -53,7 +53,7 @@ impl Router { // whatami, tables: Arc::new(TablesLock { tables: RwLock::new(Tables::new(zid, whatami, hlc, config)), - ctrl_lock: Mutex::new(hat::new_hat(whatami)), + ctrl_lock: Mutex::new(hat::new_hat(whatami, config)), queries_lock: RwLock::new(()), }), } From 0096e59d4962ddc25bf2fa8878826f001fd7246f Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 7 Dec 2023 14:55:56 +0100 Subject: [PATCH 020/122] Simplify HatTrait init function --- zenoh/src/net/routing/hat/client/mod.rs | 31 ++++++++++++------- .../src/net/routing/hat/linkstate_peer/mod.rs | 31 ++++++++++++------- zenoh/src/net/routing/hat/mod.rs | 14 ++------- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 31 ++++++++++++------- zenoh/src/net/routing/hat/router/mod.rs | 31 ++++++++++++------- zenoh/src/net/routing/router.rs | 24 ++------------ zenoh/src/net/runtime/mod.rs | 26 ++-------------- 7 files changed, 83 insertions(+), 105 deletions(-) diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 4ae063003e..3cf318831b 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -46,7 +46,7 @@ use std::{ hash::Hasher, sync::Arc, }; -use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, @@ -271,17 +271,24 @@ impl HatTables { pub(crate) struct HatCode {} impl HatBaseTrait for HatCode { - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { + fn init(&self, tables: &mut Tables, runtime: Runtime) { + let config = runtime.config.lock(); + let whatami = tables.whatami; + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); + let autoconnect = if gossip { + *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) + } else { + WhatAmIMatcher::empty() + }; + + let router_full_linkstate = whatami == WhatAmI::Router; + let peer_full_linkstate = whatami != WhatAmI::Client + && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_peers_failover_brokering = + unwrap_or_default!(config.routing().router().peers_failover_brokering()); + drop(config); + if router_full_linkstate | gossip { hat_mut!(tables).routers_net = Some(Network::new( "[Routers network]".to_string(), diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 4ae063003e..3cf318831b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -46,7 +46,7 @@ use std::{ hash::Hasher, sync::Arc, }; -use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, @@ -271,17 +271,24 @@ impl HatTables { pub(crate) struct HatCode {} impl HatBaseTrait for HatCode { - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { + fn init(&self, tables: &mut Tables, runtime: Runtime) { + let config = runtime.config.lock(); + let whatami = tables.whatami; + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); + let autoconnect = if gossip { + *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) + } else { + WhatAmIMatcher::empty() + }; + + let router_full_linkstate = whatami == WhatAmI::Router; + let peer_full_linkstate = whatami != WhatAmI::Client + && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_peers_failover_brokering = + unwrap_or_default!(config.routing().router().peers_failover_brokering()); + drop(config); + if router_full_linkstate | gossip { hat_mut!(tables).routers_net = Some(Network::new( "[Routers network]".to_string(), diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 01da9d34bf..69b0ecf877 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -27,7 +27,7 @@ use super::dispatcher::{ use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; use zenoh_buffers::ZBuf; -use zenoh_config::{unwrap_or_default, Config, WhatAmI, WhatAmIMatcher}; +use zenoh_config::{unwrap_or_default, Config, WhatAmI}; use zenoh_protocol::{ core::WireExpr, network::{ @@ -53,17 +53,7 @@ pub(crate) trait HatBaseTrait { fn as_any(&self) -> &dyn Any; #[allow(clippy::too_many_arguments)] - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ); + fn init(&self, tables: &mut Tables, runtime: Runtime); fn new_tables(&self, router_peers_failover_brokering: bool) -> Box; diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 4ae063003e..3cf318831b 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -46,7 +46,7 @@ use std::{ hash::Hasher, sync::Arc, }; -use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, @@ -271,17 +271,24 @@ impl HatTables { pub(crate) struct HatCode {} impl HatBaseTrait for HatCode { - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { + fn init(&self, tables: &mut Tables, runtime: Runtime) { + let config = runtime.config.lock(); + let whatami = tables.whatami; + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); + let autoconnect = if gossip { + *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) + } else { + WhatAmIMatcher::empty() + }; + + let router_full_linkstate = whatami == WhatAmI::Router; + let peer_full_linkstate = whatami != WhatAmI::Client + && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_peers_failover_brokering = + unwrap_or_default!(config.routing().router().peers_failover_brokering()); + drop(config); + if router_full_linkstate | gossip { hat_mut!(tables).routers_net = Some(Network::new( "[Routers network]".to_string(), diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4ae063003e..3cf318831b 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -46,7 +46,7 @@ use std::{ hash::Hasher, sync::Arc, }; -use zenoh_config::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; use zenoh_protocol::{ common::ZExtBody, network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, @@ -271,17 +271,24 @@ impl HatTables { pub(crate) struct HatCode {} impl HatBaseTrait for HatCode { - fn init( - &self, - tables: &mut Tables, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { + fn init(&self, tables: &mut Tables, runtime: Runtime) { + let config = runtime.config.lock(); + let whatami = tables.whatami; + let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); + let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); + let autoconnect = if gossip { + *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) + } else { + WhatAmIMatcher::empty() + }; + + let router_full_linkstate = whatami == WhatAmI::Router; + let peer_full_linkstate = whatami != WhatAmI::Client + && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; + let router_peers_failover_brokering = + unwrap_or_default!(config.routing().router().peers_failover_brokering()); + drop(config); + if router_full_linkstate | gossip { hat_mut!(tables).routers_net = Some(Network::new( "[Routers network]".to_string(), diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 0fb23b6b26..c97ce8225e 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -34,7 +34,7 @@ use std::sync::{Mutex, RwLock}; use uhlc::HLC; use zenoh_config::Config; use zenoh_link::Link; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::core::{WhatAmI, ZenohId}; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{ TransportMulticast, TransportPeer, TransportPeerEventHandler, TransportUnicast, @@ -60,28 +60,10 @@ impl Router { } #[allow(clippy::too_many_arguments)] - pub fn init_link_state( - &mut self, - runtime: Runtime, - router_full_linkstate: bool, - peer_full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) { + pub fn init_link_state(&mut self, runtime: Runtime) { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); - ctrl_lock.init( - &mut tables, - runtime, - router_full_linkstate, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - ) + ctrl_lock.init(&mut tables, runtime) } pub(crate) fn new_primitives( diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index d7da114442..a3574914ea 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -35,7 +35,7 @@ use stop_token::future::FutureExt; use stop_token::{StopSource, TimedOutError}; use uhlc::{HLCBuilder, HLC}; use zenoh_link::{EndPoint, Link}; -use zenoh_protocol::core::{whatami::WhatAmIMatcher, Locator, WhatAmI, ZenohId}; +use zenoh_protocol::core::{Locator, WhatAmI, ZenohId}; use zenoh_protocol::network::NetworkMessage; use zenoh_result::{bail, ZResult}; use zenoh_sync::get_mut_unchecked; @@ -93,20 +93,6 @@ impl Runtime { let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); - let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); - let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); - let autoconnect = if gossip { - *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) - } else { - WhatAmIMatcher::empty() - }; - - let router_link_state = whatami == WhatAmI::Router; - let peer_link_state = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; - let router_peers_failover_brokering = - unwrap_or_default!(config.routing().router().peers_failover_brokering()); - let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)); let handler = Arc::new(RuntimeTransportEventHandler { @@ -137,15 +123,7 @@ impl Runtime { }), }; *handler.runtime.write().unwrap() = Some(runtime.clone()); - get_mut_unchecked(&mut runtime.router.clone()).init_link_state( - runtime.clone(), - router_link_state, - peer_link_state, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - ); + get_mut_unchecked(&mut runtime.router.clone()).init_link_state(runtime.clone()); let receiver = config.subscribe(); runtime.spawn({ From d4fac3d8da500840224d4f2f7894e5efc064c9af Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 11 Dec 2023 10:05:30 +0100 Subject: [PATCH 021/122] Hats cleanup --- zenoh/src/net/routing/hat/client/mod.rs | 615 +------ zenoh/src/net/routing/hat/client/network.rs | 1007 ------------ zenoh/src/net/routing/hat/client/pubsub.rs | 1307 +-------------- zenoh/src/net/routing/hat/client/queries.rs | 1420 +---------------- .../src/net/routing/hat/linkstate_peer/mod.rs | 468 +----- .../net/routing/hat/linkstate_peer/network.rs | 27 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 958 ++--------- .../net/routing/hat/linkstate_peer/queries.rs | 1049 ++---------- zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 564 +++++++ zenoh/src/net/routing/hat/p2p_peer/mod.rs | 580 +------ zenoh/src/net/routing/hat/p2p_peer/network.rs | 1007 ------------ zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 1307 +-------------- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 1420 +---------------- zenoh/src/net/routing/hat/router/mod.rs | 6 +- zenoh/src/net/routing/hat/router/pubsub.rs | 533 ++----- zenoh/src/net/routing/hat/router/queries.rs | 543 ++----- 16 files changed, 1477 insertions(+), 11334 deletions(-) delete mode 100644 zenoh/src/net/routing/hat/client/network.rs create mode 100644 zenoh/src/net/routing/hat/p2p_peer/gossip.rs delete mode 100644 zenoh/src/net/routing/hat/p2p_peer/network.rs diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 3cf318831b..615954da60 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -17,14 +17,11 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use crate::{net::routing::dispatcher::face::Face, runtime::Runtime}; + use self::{ - network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, + pubsub::{pubsub_new_face, undeclare_client_subscription}, + queries::{queries_new_face, undeclare_client_queryable}, }; use super::{ super::dispatcher::{ @@ -33,29 +30,17 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use crate::{ - net::{ - codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, - }, - runtime::Runtime, -}; -use async_std::task::JoinHandle; use std::{ any::Any, - collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, + collections::{HashMap, HashSet}, sync::Arc, }; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::{ - common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, -}; +use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; +use zenoh_protocol::network::Oam; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; use zenoh_transport::TransportUnicast; -mod network; mod pubsub; mod queries; @@ -63,38 +48,6 @@ zconfigurable! { static ref TREES_COMPUTATION_DELAY: u64 = 100; } -macro_rules! hat { - ($t:expr) => { - $t.hat.downcast_ref::().unwrap() - }; -} -use hat; - -macro_rules! hat_mut { - ($t:expr) => { - $t.hat.downcast_mut::().unwrap() - }; -} -use hat_mut; - -macro_rules! res_hat { - ($r:expr) => { - $r.context().hat.downcast_ref::().unwrap() - }; -} -use res_hat; - -macro_rules! res_hat_mut { - ($r:expr) => { - get_mut_unchecked($r) - .context_mut() - .hat - .downcast_mut::() - .unwrap() - }; -} -use res_hat_mut; - macro_rules! face_hat { ($f:expr) => { $f.hat.downcast_ref::().unwrap() @@ -109,220 +62,21 @@ macro_rules! face_hat_mut { } use face_hat_mut; -struct HatTables { - router_subs: HashSet>, - peer_subs: HashSet>, - router_qabls: HashSet>, - peer_qabls: HashSet>, - routers_net: Option, - peers_net: Option, - shared_nodes: Vec, - routers_trees_task: Option>, - peers_trees_task: Option>, - router_peers_failover_brokering: bool, -} +struct HatTables {} impl HatTables { - fn new(router_peers_failover_brokering: bool) -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), - routers_net: None, - peers_net: None, - shared_nodes: vec![], - routers_trees_task: None, - peers_trees_task: None, - router_peers_failover_brokering, - } - } - - #[inline] - fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { - match net_type { - WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), - _ => None, - } - } - - #[inline] - fn full_net(&self, net_type: WhatAmI) -> bool { - match net_type { - WhatAmI::Router => self - .routers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - WhatAmI::Peer => self - .peers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - _ => false, - } - } - - #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net - .as_ref() - .unwrap() - .get_links(peer) - .iter() - .filter(move |nid| { - if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { - node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router - } else { - false - } - }) - } - - #[inline] - fn elect_router<'a>( - &'a self, - self_zid: &'a ZenohId, - key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { - match routers.next() { - None => self_zid, - Some(router) => { - let hash = |r: &ZenohId| { - let mut hasher = DefaultHasher::new(); - for b in key_expr.as_bytes() { - hasher.write_u8(*b); - } - for b in &r.to_le_bytes()[..r.size()] { - hasher.write_u8(*b); - } - hasher.finish() - }; - let mut res = router; - let mut h = None; - for router2 in routers { - let h2 = hash(router2); - if h2 > *h.get_or_insert_with(|| hash(res)) { - res = router2; - h = Some(h2); - } - } - res - } - } - } - - #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { - // if source_links is empty then gossip is probably disabled in source peer - !source_links.is_empty() && !source_links.contains(&dest) - } - - #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { - self.router_peers_failover_brokering - && self - .peers_net - .as_ref() - .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) - .unwrap_or(false) - } - - fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; - - log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); - - log::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; - })); - match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, - }; - } + fn new() -> Self { + Self {} } } pub(crate) struct HatCode {} impl HatBaseTrait for HatCode { - fn init(&self, tables: &mut Tables, runtime: Runtime) { - let config = runtime.config.lock(); - let whatami = tables.whatami; - let gossip = unwrap_or_default!(config.scouting().gossip().enabled()); - let gossip_multihop = unwrap_or_default!(config.scouting().gossip().multihop()); - let autoconnect = if gossip { - *unwrap_or_default!(config.scouting().gossip().autoconnect().get(whatami)) - } else { - WhatAmIMatcher::empty() - }; - - let router_full_linkstate = whatami == WhatAmI::Router; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; - let router_peers_failover_brokering = - unwrap_or_default!(config.routing().router().peers_failover_brokering()); - drop(config); - - if router_full_linkstate | gossip { - hat_mut!(tables).routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - } + fn init(&self, _tables: &mut Tables, _runtime: Runtime) {} - fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { - Box::new(HatTables::new(router_peers_failover_brokering)) + fn new_tables(&self, _router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new()) } fn new_face(&self) -> Box { @@ -347,52 +101,12 @@ impl HatBaseTrait for HatCode { fn new_transport_unicast_face( &self, tables: &mut Tables, - tables_ref: &Arc, + _tables_ref: &Arc, face: &mut Face, - transport: &TransportUnicast, + _transport: &TransportUnicast, ) -> ZResult<()> { - let link_id = match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } - } - _ => 0, - }; - - if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - face_hat_mut!(&mut face.state).link_id = link_id; pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); - - match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } - } - _ => (), - } Ok(()) } @@ -500,200 +214,29 @@ impl HatBaseTrait for HatCode { fn handle_oam( &self, - tables: &mut Tables, - tables_ref: &Arc, - oam: Oam, - transport: &TransportUnicast, + _tables: &mut Tables, + _tables_ref: &Arc, + _oam: Oam, + _transport: &TransportUnicast, ) -> ZResult<()> { - if oam.id == OAM_LINKSTATE { - if let ZExtBody::ZBuf(buf) = oam.body { - if let Ok(zid) = transport.get_zid() { - use zenoh_buffers::reader::HasReader; - use zenoh_codec::RCodec; - let codec = Zenoh080Routing::new(); - let mut reader = buf.reader(); - let list: LinkStateList = codec.read(&mut reader).unwrap(); - - let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), - }; - } - } - } - Ok(()) } fn map_routing_context( &self, - tables: &Tables, - face: &FaceState, - routing_context: NodeId, + _tables: &Tables, + _face: &FaceState, + _routing_context: NodeId, ) -> NodeId { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - }, - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - } + 0 } fn closing( &self, - tables: &mut Tables, - tables_ref: &Arc, - transport: &TransportUnicast, + _tables: &mut Tables, + _tables_ref: &Arc, + _transport: &TransportUnicast, ) -> ZResult<()> { - match (transport.get_zid(), transport.get_whatami()) { - (Ok(zid), Ok(whatami)) => { - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in hat_mut!(tables) - .peers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), - }; - } - (_, _) => log::error!("Closed transport in session closing!"), - } Ok(()) } @@ -702,72 +245,35 @@ impl HatBaseTrait for HatCode { } #[inline] - fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) + fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { + true } #[inline] fn egress_filter( &self, - tables: &Tables, + _tables: &Tables, src_face: &FaceState, out_face: &Arc, - expr: &mut RoutingExpr, + _expr: &mut RoutingExpr, ) -> bool { - if src_face.id != out_face.id + src_face.id != out_face.id && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { (Some(l), Some(r)) => l != r, _ => true, } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); - } - false } } -struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, -} +struct HatContext {} impl HatContext { fn new() -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), - } + Self {} } } struct HatFace { - link_id: usize, local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -777,7 +283,6 @@ struct HatFace { impl HatFace { fn new() -> Self { Self { - link_id: 0, local_subs: HashSet::new(), remote_subs: HashSet::new(), local_qabls: HashMap::new(), @@ -786,58 +291,4 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - match hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_link(face_hat!(face).link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - face - ); - None - } - } -} - -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - match hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_link(face_hat!(face).link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received peer declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in peers network for {}", - face - ); - None - } - } -} - impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/client/network.rs b/zenoh/src/net/routing/hat/client/network.rs deleted file mode 100644 index 421850dc87..0000000000 --- a/zenoh/src/net/routing/hat/client/network.rs +++ /dev/null @@ -1,1007 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use async_std::task; -use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; -use std::convert::TryInto; -use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; -use zenoh_codec::WCodec; -use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; -use zenoh_transport::TransportUnicast; - -#[derive(Clone)] -struct Details { - zid: bool, - locators: bool, - links: bool, -} - -#[derive(Clone)] -pub(super) struct Node { - pub(super) zid: ZenohId, - pub(super) whatami: Option, - pub(super) locators: Option>, - pub(super) sn: u64, - pub(super) links: Vec, -} - -impl std::fmt::Debug for Node { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.zid) - } -} - -pub(super) struct Link { - pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, - local_mappings: VecMap, -} - -impl Link { - fn new(transport: TransportUnicast) -> Self { - let zid = transport.get_zid().unwrap(); - Link { - transport, - zid, - mappings: VecMap::new(), - local_mappings: VecMap::new(), - } - } - - #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { - self.mappings.insert(psid.try_into().unwrap(), zid); - } - - #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { - self.mappings.get((*psid).try_into().unwrap()) - } - - #[inline] - pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { - self.local_mappings - .insert(psid.try_into().unwrap(), local_psid); - } - - #[inline] - pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { - self.local_mappings.get((*psid).try_into().unwrap()) - } -} - -pub(super) struct Changes { - pub(super) updated_nodes: Vec<(NodeIndex, Node)>, - pub(super) removed_nodes: Vec<(NodeIndex, Node)>, -} - -#[derive(Clone)] -pub(super) struct Tree { - pub(super) parent: Option, - pub(super) childs: Vec, - pub(super) directions: Vec>, -} - -pub(super) struct Network { - pub(super) name: String, - pub(super) full_linkstate: bool, - pub(super) router_peers_failover_brokering: bool, - pub(super) gossip: bool, - pub(super) gossip_multihop: bool, - pub(super) autoconnect: WhatAmIMatcher, - pub(super) idx: NodeIndex, - pub(super) links: VecMap, - pub(super) trees: Vec, - pub(super) distances: Vec, - pub(super) graph: petgraph::stable_graph::StableUnGraph, - pub(super) runtime: Runtime, -} - -impl Network { - #[allow(clippy::too_many_arguments)] - pub(super) fn new( - name: String, - zid: ZenohId, - runtime: Runtime, - full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) -> Self { - let mut graph = petgraph::stable_graph::StableGraph::default(); - log::debug!("{} Add node (self) {}", name, zid); - let idx = graph.add_node(Node { - zid, - whatami: Some(runtime.whatami), - locators: None, - sn: 1, - links: vec![], - }); - Network { - name, - full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - idx, - links: VecMap::new(), - trees: vec![Tree { - parent: None, - childs: vec![], - directions: vec![None], - }], - distances: vec![0.0], - graph, - runtime, - } - } - - //noinspection ALL - // pub(super) fn dot(&self) -> String { - // std::format!( - // "{:?}", - // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) - // ) - // } - - #[inline] - pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { - self.graph.node_weights().find(|weight| weight.zid == *zid) - } - - #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { - self.graph - .node_indices() - .find(|idx| self.graph[*idx].zid == *zid) - } - - #[inline] - pub(super) fn get_link(&self, id: usize) -> Option<&Link> { - self.links.get(id) - } - - #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { - self.links.values().find(|link| link.zid == *zid) - } - - #[inline] - pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { - match self.get_link(link_id) { - Some(link) => match link.get_local_psid(&(context as u64)) { - Some(psid) => (*psid).try_into().unwrap_or(0), - None => { - log::error!( - "Cannot find local psid for context {} on link {}", - context, - link_id - ); - 0 - } - }, - None => { - log::error!("Cannot find link {}", link_id); - 0 - } - } - } - - fn add_node(&mut self, node: Node) -> NodeIndex { - let zid = node.zid; - let idx = self.graph.add_node(node); - for link in self.links.values_mut() { - if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { - link.local_mappings.insert(psid, idx.index() as u64); - } - } - idx - } - - fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { - let links = if details.links { - self.graph[idx] - .links - .iter() - .filter_map(|zid| { - if let Some(idx2) = self.get_idx(zid) { - Some(idx2.index().try_into().unwrap()) - } else { - log::error!( - "{} Internal error building link state: cannot get index of {}", - self.name, - zid - ); - None - } - }) - .collect() - } else { - vec![] - }; - LinkState { - psid: idx.index().try_into().unwrap(), - sn: self.graph[idx].sn, - zid: if details.zid { - Some(self.graph[idx].zid) - } else { - None - }, - whatami: self.graph[idx].whatami, - locators: if details.locators { - if idx == self.idx { - Some(self.runtime.get_locators()) - } else { - self.graph[idx].locators.clone() - } - } else { - None - }, - links, - } - } - - fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { - let mut link_states = vec![]; - for (idx, details) in idxs { - link_states.push(self.make_link_state(idx, details)); - } - let codec = Zenoh080Routing::new(); - let mut buf = ZBuf::empty(); - codec.write(&mut buf.writer(), &LinkStateList { link_states })?; - Ok(NetworkBody::OAM(Oam { - id: OAM_LINKSTATE, - body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), - ext_tstamp: None, - }) - .into()) - } - - fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { - if let Ok(msg) = self.make_msg(idxs) { - log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); - if let Err(e) = transport.schedule(msg) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); - } - } else { - log::error!("Failed to encode Linkstate message"); - } - } - - fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) - where - P: FnMut(&Link) -> bool, - { - if let Ok(msg) = self.make_msg(idxs) { - for link in self.links.values() { - if parameters(link) { - log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); - if let Err(e) = link.transport.schedule(msg.clone()) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); - } - } - } - } else { - log::error!("Failed to encode Linkstate message"); - } - } - - // Indicates if locators should be included when propagating Linkstate message - // from the given node. - // Returns true if gossip is enabled and if multihop gossip is enabled or - // the node is one of self neighbours. - fn propagate_locators(&self, idx: NodeIndex) -> bool { - self.gossip - && (self.gossip_multihop - || idx == self.idx - || self.links.values().any(|link| { - self.graph - .node_weight(idx) - .map(|node| link.zid == node.zid) - .unwrap_or(true) - })) - } - - fn update_edge(&mut self, idx1: NodeIndex, idx2: NodeIndex) { - use std::hash::Hasher; - let mut hasher = std::collections::hash_map::DefaultHasher::default(); - if self.graph[idx1].zid > self.graph[idx2].zid { - hasher.write(&self.graph[idx2].zid.to_le_bytes()); - hasher.write(&self.graph[idx1].zid.to_le_bytes()); - } else { - hasher.write(&self.graph[idx1].zid.to_le_bytes()); - hasher.write(&self.graph[idx2].zid.to_le_bytes()); - } - let weight = 100.0 + ((hasher.finish() as u32) as f64) / u32::MAX as f64; - self.graph.update_edge(idx1, idx2, weight); - } - - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { - log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); - - let graph = &self.graph; - let links = &mut self.links; - - let src_link = match links.values_mut().find(|link| link.zid == src) { - Some(link) => link, - None => { - log::error!( - "{} Received LinkStateList from unknown link {}", - self.name, - src - ); - return Changes { - updated_nodes: vec![], - removed_nodes: vec![], - }; - } - }; - - // register psid<->zid mappings & apply mapping to nodes - #[allow(clippy::needless_collect)] // need to release borrow on self - let link_states = link_states - .into_iter() - .filter_map(|link_state| { - if let Some(zid) = link_state.zid { - src_link.set_zid_mapping(link_state.psid, zid); - if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { - src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); - } - Some(( - zid, - link_state.whatami.unwrap_or(WhatAmI::Router), - link_state.locators, - link_state.sn, - link_state.links, - )) - } else { - match src_link.get_zid(&link_state.psid) { - Some(zid) => Some(( - *zid, - link_state.whatami.unwrap_or(WhatAmI::Router), - link_state.locators, - link_state.sn, - link_state.links, - )), - None => { - log::error!( - "Received LinkState from {} with unknown node mapping {}", - src, - link_state.psid - ); - None - } - } - } - }) - .collect::>(); - - // apply psid<->zid mapping to links - let src_link = self.get_link_from_zid(&src).unwrap(); - let link_states = link_states - .into_iter() - .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links - .iter() - .filter_map(|l| { - if let Some(zid) = src_link.get_zid(l) { - Some(*zid) - } else { - log::error!( - "{} Received LinkState from {} with unknown link mapping {}", - self.name, - src, - l - ); - None - } - }) - .collect(); - (zid, wai, locs, sn, links) - }) - .collect::>(); - - // log::trace!( - // "{} Received from {} mapped: {:?}", - // self.name, - // src, - // link_states - // ); - for link_state in &link_states { - log::trace!( - "{} Received from {} mapped: {:?}", - self.name, - src, - link_state - ); - } - - if !self.full_linkstate { - let mut changes = Changes { - updated_nodes: vec![], - removed_nodes: vec![], - }; - for (zid, whatami, locators, sn, links) in link_states.into_iter() { - let idx = match self.get_idx(&zid) { - None => { - let idx = self.add_node(Node { - zid, - whatami: Some(whatami), - locators: locators.clone(), - sn, - links, - }); - changes.updated_nodes.push((idx, self.graph[idx].clone())); - locators.is_some().then_some(idx) - } - Some(idx) => { - let node = &mut self.graph[idx]; - let oldsn = node.sn; - (oldsn < sn) - .then(|| { - node.sn = sn; - node.links = links.clone(); - changes.updated_nodes.push((idx, node.clone())); - (node.locators != locators && locators.is_some()).then(|| { - node.locators = locators.clone(); - idx - }) - }) - .flatten() - } - }; - - if self.gossip { - if let Some(idx) = idx { - if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { - self.send_on_links( - vec![( - idx, - Details { - zid: true, - locators: true, - links: false, - }, - )], - |link| link.zid != zid, - ); - } - - if !self.autoconnect.is_empty() { - // Connect discovered peers - if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = locators { - let runtime = self.runtime.clone(); - self.runtime.spawn(async move { - // random backoff - async_std::task::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; - }); - } - } - } - } - } - } - return changes; - } - - // Add nodes to graph & filter out up to date states - let mut link_states = link_states - .into_iter() - .filter_map( - |(zid, whatami, locators, sn, links)| match self.get_idx(&zid) { - Some(idx) => { - let node = &mut self.graph[idx]; - let oldsn = node.sn; - if oldsn < sn { - node.sn = sn; - node.links = links.clone(); - if locators.is_some() { - node.locators = locators; - } - if oldsn == 0 { - Some((links, idx, true)) - } else { - Some((links, idx, false)) - } - } else { - None - } - } - None => { - let node = Node { - zid, - whatami: Some(whatami), - locators, - sn, - links: links.clone(), - }; - log::debug!("{} Add node (state) {}", self.name, zid); - let idx = self.add_node(node); - Some((links, idx, true)) - } - }, - ) - .collect::, NodeIndex, bool)>>(); - - // Add/remove edges from graph - let mut reintroduced_nodes = vec![]; - for (links, idx1, _) in &link_states { - for link in links { - if let Some(idx2) = self.get_idx(link) { - if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { - log::trace!( - "{} Update edge (state) {} {}", - self.name, - self.graph[*idx1].zid, - self.graph[idx2].zid - ); - self.update_edge(*idx1, idx2); - } - } else { - let node = Node { - zid: *link, - whatami: None, - locators: None, - sn: 0, - links: vec![], - }; - log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); - let idx = self.add_node(node); - reintroduced_nodes.push((vec![], idx, true)); - } - } - let mut edges = vec![]; - let mut neighbors = self.graph.neighbors_undirected(*idx1).detach(); - while let Some(edge) = neighbors.next(&self.graph) { - edges.push(edge); - } - for (eidx, idx2) in edges { - if !links.contains(&self.graph[idx2].zid) { - log::trace!( - "{} Remove edge (state) {} {}", - self.name, - self.graph[*idx1].zid, - self.graph[idx2].zid - ); - self.graph.remove_edge(eidx); - } - } - } - link_states.extend(reintroduced_nodes); - - let removed = self.remove_detached_nodes(); - let link_states = link_states - .into_iter() - .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); - - if !self.autoconnect.is_empty() { - // Connect discovered peers - for (_, idx, _) in &link_states { - let node = &self.graph[*idx]; - if let Some(whatami) = node.whatami { - if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = &node.locators { - let runtime = self.runtime.clone(); - let zid = node.zid; - let locators = locators.clone(); - self.runtime.spawn(async move { - // random backoff - async_std::task::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; - }); - } - } - } - } - } - - // Propagate link states - // Note: we need to send all states at once for each face - // to avoid premature node deletion on the other side - #[allow(clippy::type_complexity)] - if !link_states.is_empty() { - let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, - ) = link_states.into_iter().partition(|(_, _, new)| *new); - let new_idxs = new_idxs - .into_iter() - .map(|(_, idx1, _new_node)| { - ( - idx1, - Details { - zid: true, - locators: self.propagate_locators(idx1), - links: true, - }, - ) - }) - .collect::>(); - for link in self.links.values() { - if link.zid != src { - let updated_idxs: Vec<(NodeIndex, Details)> = updated_idxs - .clone() - .into_iter() - .filter_map(|(_, idx1, _)| { - if link.zid != self.graph[idx1].zid { - Some(( - idx1, - Details { - zid: false, - locators: self.propagate_locators(idx1), - links: true, - }, - )) - } else { - None - } - }) - .collect(); - if !new_idxs.is_empty() || !updated_idxs.is_empty() { - self.send_on_link( - [&new_idxs[..], &updated_idxs[..]].concat(), - &link.transport, - ); - } - } else if !new_idxs.is_empty() { - self.send_on_link(new_idxs.clone(), &link.transport); - } - } - } - Changes { - updated_nodes: vec![], - removed_nodes: removed, - } - } - - pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { - let free_index = { - let mut i = 0; - while self.links.contains_key(i) { - i += 1; - } - i - }; - self.links.insert(free_index, Link::new(transport.clone())); - - let zid = transport.get_zid().unwrap(); - let whatami = transport.get_whatami().unwrap(); - - if self.full_linkstate || self.router_peers_failover_brokering { - let (idx, new) = match self.get_idx(&zid) { - Some(idx) => (idx, false), - None => { - log::debug!("{} Add node (link) {}", self.name, zid); - ( - self.add_node(Node { - zid, - whatami: Some(whatami), - locators: None, - sn: 0, - links: vec![], - }), - true, - ) - } - }; - if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { - log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); - self.update_edge(self.idx, idx); - } - self.graph[self.idx].links.push(zid); - self.graph[self.idx].sn += 1; - - // Send updated self linkstate on all existing links except new one - self.links - .values() - .filter(|link| { - link.zid != zid - && (self.full_linkstate - || link.transport.get_whatami().unwrap_or(WhatAmI::Peer) - == WhatAmI::Router) - }) - .for_each(|link| { - self.send_on_link( - if new || (!self.full_linkstate && !self.gossip_multihop) { - vec![ - ( - idx, - Details { - zid: true, - locators: false, - links: false, - }, - ), - ( - self.idx, - Details { - zid: false, - locators: self.propagate_locators(idx), - links: true, - }, - ), - ] - } else { - vec![( - self.idx, - Details { - zid: false, - locators: self.propagate_locators(idx), - links: true, - }, - )] - }, - &link.transport, - ) - }); - } - - // Send all nodes linkstate on new link - let idxs = self - .graph - .node_indices() - .filter_map(|idx| { - (self.full_linkstate - || self.gossip_multihop - || self.links.values().any(|link| link.zid == zid) - || (self.router_peers_failover_brokering - && idx == self.idx - && whatami == WhatAmI::Router)) - .then(|| { - ( - idx, - Details { - zid: true, - locators: self.propagate_locators(idx), - links: self.full_linkstate - || (self.router_peers_failover_brokering - && idx == self.idx - && whatami == WhatAmI::Router), - }, - ) - }) - }) - .collect(); - self.send_on_link(idxs, &transport); - free_index - } - - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { - log::trace!("{} remove_link {}", self.name, zid); - self.links.retain(|_, link| link.zid != *zid); - self.graph[self.idx].links.retain(|link| *link != *zid); - - if self.full_linkstate { - if let Some((edge, _)) = self - .get_idx(zid) - .and_then(|idx| self.graph.find_edge_undirected(self.idx, idx)) - { - self.graph.remove_edge(edge); - } - let removed = self.remove_detached_nodes(); - - self.graph[self.idx].sn += 1; - - self.send_on_links( - vec![( - self.idx, - Details { - zid: false, - locators: self.gossip, - links: true, - }, - )], - |_| true, - ); - - removed - } else { - if let Some(idx) = self.get_idx(zid) { - self.graph.remove_node(idx); - } - if self.router_peers_failover_brokering { - self.send_on_links( - vec![( - self.idx, - Details { - zid: false, - locators: self.gossip, - links: true, - }, - )], - |link| { - link.zid != *zid - && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) - == WhatAmI::Router - }, - ); - } - vec![] - } - } - - fn remove_detached_nodes(&mut self) -> Vec<(NodeIndex, Node)> { - let mut dfs_stack = vec![self.idx]; - let mut visit_map = self.graph.visit_map(); - while let Some(node) = dfs_stack.pop() { - if visit_map.visit(node) { - for succzid in &self.graph[node].links { - if let Some(succ) = self.get_idx(succzid) { - if !visit_map.is_visited(&succ) { - dfs_stack.push(succ); - } - } - } - } - } - - let mut removed = vec![]; - for idx in self.graph.node_indices().collect::>() { - if !visit_map.is_visited(&idx) { - log::debug!("Remove node {}", &self.graph[idx].zid); - removed.push((idx, self.graph.remove_node(idx).unwrap())); - } - } - removed - } - - pub(super) fn compute_trees(&mut self) -> Vec> { - let indexes = self.graph.node_indices().collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); - - self.trees.clear(); - self.trees.resize_with(max_idx.index() + 1, || Tree { - parent: None, - childs: vec![], - directions: vec![], - }); - - for tree_root_idx in &indexes { - let paths = petgraph::algo::bellman_ford(&self.graph, *tree_root_idx).unwrap(); - - if tree_root_idx.index() == 0 { - self.distances = paths.distances; - } - - if log::log_enabled!(log::Level::Debug) { - let ps: Vec> = paths - .predecessors - .iter() - .enumerate() - .map(|(is, o)| { - o.map(|ip| { - format!( - "{} <- {}", - self.graph[ip].zid, - self.graph[NodeIndex::new(is)].zid - ) - }) - }) - .collect(); - log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); - } - - self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; - - for idx in &indexes { - if let Some(parent_idx) = paths.predecessors[idx.index()] { - if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); - } - } - } - - self.trees[tree_root_idx.index()] - .directions - .resize_with(max_idx.index() + 1, || None); - let mut dfs = petgraph::algo::DfsSpace::new(&self.graph); - for destination in &indexes { - if self.idx != *destination - && petgraph::algo::has_path_connecting( - &self.graph, - self.idx, - *destination, - Some(&mut dfs), - ) - { - let mut direction = None; - let mut current = *destination; - while let Some(parent) = paths.predecessors[current.index()] { - if parent == self.idx { - direction = Some(current); - break; - } else { - current = parent; - } - } - - self.trees[tree_root_idx.index()].directions[destination.index()] = - match direction { - Some(direction) => Some(direction), - None => self.trees[tree_root_idx.index()].parent, - }; - } - } - } - - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); - - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { - self.trees[i] - .childs - .iter() - .filter(|idx| !old_childs[i].contains(idx)) - .cloned() - .collect() - } else { - self.trees[i].childs.clone() - }; - } - - new_childs - } - - #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { - self.get_node(&node) - .map(|node| &node.links[..]) - .unwrap_or_default() - } -} - -#[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { - net1.graph - .node_references() - .filter_map(|(_, node1)| { - net2.graph - .node_references() - .any(|(_, node2)| node1.zid == node2.zid) - .then_some(node1.zid) - }) - .collect() -} diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 189e6cb6e8..8c501c5897 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{face_hat, face_hat_mut}; +use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -21,14 +20,13 @@ use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, Rou use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use petgraph::graph::NodeIndex; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; use zenoh_core::zread; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -36,79 +34,18 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -#[inline] -fn send_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - sub_info: &SubscriberInfo, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - #[inline] fn propagate_simple_subscription_to( - tables: &mut Tables, + _tables: &mut Tables, dst_face: &mut Arc, res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, - full_peer_net: bool, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !face_hat!(dst_face).local_subs.contains(res) - && match tables.whatami { - WhatAmI::Router => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) - } - } - WhatAmI::Peer => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client - } - } - _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, - } + && src_face.whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); @@ -134,233 +71,13 @@ fn propagate_simple_subscription( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_subscription_to( - tables, - &mut dst_face, - res, - sub_info, - src_face, - full_peer_net, - ); - } -} - -fn propagate_sourced_subscription( - tables: &Tables, - res: &Arc, - sub_info: &SubscriberInfo, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - sub_info, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - if !res_hat!(res).router_subs.contains(&router) { - // Register router subscription - { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.insert(router); - hat_mut!(tables).router_subs.insert(res.clone()); - } - - // Propagate subscription to routers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); - } - // Propagate subscription to peers - if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) - } - - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); -} - -fn declare_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_subscription(&mut wtables, face, &mut res, sub_info, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } -} - -fn register_peer_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - if !res_hat!(res).peer_subs.contains(&peer) { - // Register peer subscription - { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); - } - - // Propagate subscription to peers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); - } -} - -fn declare_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - if wtables.whatami == WhatAmI::Router { - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), + propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); } } @@ -440,75 +157,29 @@ fn declare_client_subscription( register_client_subscription(&mut wtables, face, &mut res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - match wtables.whatami { - WhatAmI::Router => { - let zid = wtables.zid; - register_router_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let zid = wtables.zid; - register_peer_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } else { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } - _ => { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } + + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } + disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -528,24 +199,6 @@ fn declare_client_subscription( } } -#[inline] -fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_subs - .iter() - .any(|peer| peer != &tables.zid) -} - -#[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .peer_subs - .iter() - .any(|peer| peer != &tables.zid) -} - #[inline] fn client_subs(res: &Arc) -> Vec> { res.session_ctxs @@ -560,46 +213,6 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } -#[inline] -fn send_forget_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { @@ -621,224 +234,6 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc } } -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_subs.len() == 1 - && res_hat!(res).router_subs.contains(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_subs.remove(res); - } - } - } -} - -fn propagate_forget_sourced_subscription( - tables: &Tables, - res: &Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - Some(tree_sid.index() as NodeId), - ); - } else { - log::trace!( - "Propagating forget sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.retain(|sub| sub != router); - - if res_hat!(res).router_subs.is_empty() { - hat_mut!(tables) - .router_subs - .retain(|sub| !Arc::ptr_eq(sub, res)); - - if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_subscription(tables, res); - } - - propagate_forget_simple_subscription_to_peers(tables, res); -} - -fn undeclare_router_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_subs.contains(router) { - unregister_router_subscription(tables, res, router); - propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router subscription!"), - }, - None => log::error!("Undeclare router subscription with unknown scope!"), - } -} - -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); - - if res_hat!(res).peer_subs.is_empty() { - hat_mut!(tables) - .peer_subs - .retain(|sub| !Arc::ptr_eq(sub, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); - } - } -} - -fn undeclare_peer_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res_hat!(res).peer_subs.contains(peer) { - unregister_peer_subscription(tables, res, peer); - propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); - } -} - -fn forget_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer subscription!"), - }, - None => log::error!("Undeclare peer subscription with unknown scope!"), - } -} - pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, @@ -851,32 +246,10 @@ pub(super) fn undeclare_client_subscription( face_hat_mut!(face).remote_subs.remove(res); let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if client_subs.is_empty() { - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription(tables, res); - } - } - } - _ => { - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - } + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); } - if client_subs.len() == 1 && !router_subs && !peer_subs { + if client_subs.len() == 1 { let face = &mut client_subs[0]; if face_hat!(face).local_subs.contains(res) && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) @@ -939,336 +312,15 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, }; - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } - WhatAmI::Client => { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } -} - -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - for mut res in hat!(tables) - .router_subs - .iter() - .filter(|res| res_hat!(res).router_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_router_subscription(tables, &mut res, node); - - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - WhatAmI::Peer => { - for mut res in hat!(tables) - .peer_subs - .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_peer_subscription(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); - } - } - - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(super) fn pubsub_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let subs_res = match net_type { - WhatAmI::Router => &hat!(tables).router_subs, - _ => &hat!(tables).peer_subs, - }; - - for res in subs_res { - let subs = match net_type { - WhatAmI::Router => &res_hat!(res).router_subs, - _ => &res_hat!(res).peer_subs, - }; - for sub in subs { - if *sub == tree_id { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, - }; - send_sourced_subscription_to_net_childs( - tables, - net, - tree_childs, - res, - None, - &sub_info, - tree_sid as NodeId, - ); - } - } - } - } - } - } - - // recompute routes - compute_data_routes_from(tables, &mut tables.root_res.clone()); -} - -pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid).cloned() { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_subs { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); - - face_hat_mut!(dst_face).local_subs.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // TODO - mode: Mode::Push, - }; - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } - } - } - } - } - } -} - -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - subs: &HashSet, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } - } - } - } - } + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); } } @@ -1279,28 +331,10 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_subscription(tables, rtables, face, expr, sub_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) - } - } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) - } - } - _ => declare_client_subscription(tables, rtables, face, expr, sub_info), - } + declare_client_subscription(tables, rtables, face, expr, sub_info); } fn forget_subscription( @@ -1308,28 +342,10 @@ impl HatPubSubTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_subscription(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer) - } - } else { - forget_client_subscription(tables, rtables, face, expr) - } - } - _ => forget_client_subscription(tables, rtables, face, expr), - } + forget_client_subscription(tables, rtables, face, expr); } fn compute_data_route( @@ -1364,82 +380,23 @@ impl HatPubSubTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_subs, - ); - } - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -1493,73 +450,8 @@ impl HatPubSubTrait for HatCode { client_data_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + routes.client_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -1569,81 +461,14 @@ impl HatPubSubTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); - for idx in &indexes { - peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index c1093a8a00..41ef4feb0d 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{face_hat, face_hat_mut}; +use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -24,7 +23,6 @@ use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; -use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; @@ -32,7 +30,7 @@ use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + core::{key_expr::keyexpr, WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -56,120 +54,11 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI this } -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if hat!(tables).full_net(WhatAmI::Peer) { - res.context.as_ref().and_then(|_| { - res_hat!(res) - .peer_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res_hat!(res) - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { - let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res_hat!(res) - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { - info = res_hat!(res) - .peer_qabls - .iter() - .fold(info, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } +fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { res.session_ctxs .values() - .fold(info, |accu, ctx| { - if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer - || face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(ctx.face.zid, face.zid) - { + .fold(None, |accu, ctx| { + if ctx.face.id != face.id { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { Some(accu) => merge_qabl_infos(accu, info), @@ -188,91 +77,20 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) }) } -#[allow(clippy::too_many_arguments)] -#[inline] -fn send_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, ) { - let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) - && match tables.whatami { - WhatAmI::Router => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering( - src_face.as_ref().unwrap().zid, - dst_face.zid, - )) - } - } - WhatAmI::Peer => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - _ => { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client) { face_hat_mut!(&mut dst_face) .local_qabls @@ -295,234 +113,6 @@ fn propagate_simple_queryable( } } -fn propagate_sourced_queryable( - tables: &Tables, - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - qabl_info, - src_face, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - let current_info = res_hat!(res).router_qabls.get(&router); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register router queryable - { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.insert(router, *qabl_info); - hat_mut!(tables).router_qabls.insert(res.clone()); - } - - // Propagate queryable to routers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &router, - WhatAmI::Router, - ); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - // Propagate queryable to peers - if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) - } - } - - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); -} - -fn declare_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register router queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - -fn register_peer_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - let current_info = res_hat!(res).peer_qabls.get(&peer); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register peer queryable - { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); - } - - // Propagate queryable to peers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &peer, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); - } -} - -fn declare_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register peer queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - let mut face = Some(face); - register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - if wtables.whatami == WhatAmI::Router { - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - } - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, @@ -583,38 +173,7 @@ fn declare_client_queryable( }; register_client_queryable(&mut wtables, face, &mut res, qabl_info); - - match wtables.whatami { - WhatAmI::Router => { - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } else { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - _ => { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } + propagate_simple_queryable(&mut wtables, &res, Some(face)); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -634,24 +193,6 @@ fn declare_client_queryable( } } -#[inline] -fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_qabls - .keys() - .any(|router| router != &tables.zid) -} - -#[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .peer_qabls - .keys() - .any(|peer| peer != &tables.zid) -} - #[inline] fn client_qabls(res: &Arc) -> Vec> { res.session_ctxs @@ -666,46 +207,6 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } -#[inline] -fn send_forget_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { @@ -728,225 +229,6 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_qabls.len() == 1 - && res_hat!(res).router_qabls.contains_key(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_qabls.contains_key(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_qabls.remove(res); - } - } - } -} - -fn propagate_forget_sourced_queryable( - tables: &mut Tables, - res: &mut Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.remove(router); - - if res_hat!(res).router_qabls.is_empty() { - hat_mut!(tables) - .router_qabls - .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_queryable(tables, res); - } - - propagate_forget_simple_queryable_to_peers(tables, res); -} - -fn undeclare_router_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); - propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router queryable!"), - }, - None => log::error!("Undeclare router queryable with unknown scope!"), - } -} - -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.remove(peer); - - if res_hat!(res).peer_qabls.is_empty() { - hat_mut!(tables) - .peer_qabls - .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); - } - } -} - -fn undeclare_peer_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); - propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); - } -} - -fn forget_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - - if wtables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } - } - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer queryable!"), - }, - None => log::error!("Undeclare peer queryable with unknown scope!"), - } -} - pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, @@ -961,43 +243,12 @@ pub(super) fn undeclare_client_queryable( } let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); - - match tables.whatami { - WhatAmI::Router => { - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - } else if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - _ => { - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + if client_qabls.len() == 1 { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -1053,340 +304,17 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } else { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } - WhatAmI::Client => { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } -} - -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - } - } - WhatAmI::Peer => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); - } - } - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid) { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_qabls { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); - - face_hat_mut!(dst_face).local_qabls.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - face_hat_mut!(dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } - } - } - } - } - } -} - -pub(super) fn queries_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let qabls_res = match net_type { - WhatAmI::Router => &hat!(tables).router_qabls, - _ => &hat!(tables).peer_qabls, - }; - - for res in qabls_res { - let qabls = match net_type { - WhatAmI::Router => &res_hat!(res).router_qabls, - _ => &res_hat!(res).peer_qabls, - }; - if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( - tables, - net, - tree_childs, - res, - qabl_info, - None, - tree_sid as NodeId, - ); - } - } - } +pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } - - // recompute routes - compute_query_routes_from(tables, &mut tables.root_res.clone()); -} - -#[inline] -#[allow(clippy::too_many_arguments)] -fn insert_target_for_qabls( - route: &mut QueryTargetQablSet, - expr: &mut RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - qabls: &HashMap, - complete: bool, -) { - if net.trees.len() > source as usize { - for (qabl, qabl_info) in qabls { - if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source as usize].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - if net.distances.len() > qabl_idx.index() { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: (face.clone(), key_expr.to_owned(), source), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: net.distances[qabl_idx.index()], - }); - } - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } } lazy_static::lazy_static! { @@ -1400,28 +328,10 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_queryable(tables, rtables, face, expr, qabl_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) - } - } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info) - } - } - _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), - } + declare_client_queryable(tables, rtables, face, expr, qabl_info); } fn forget_queryable( @@ -1429,28 +339,10 @@ impl HatQueriesTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_queryable(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer) - } - } else { - forget_client_queryable(tables, rtables, face, expr) - } - } - _ => forget_client_queryable(tables, rtables, face, expr), - } + forget_client_queryable(tables, rtables, face, expr); } fn compute_query_route( @@ -1485,95 +377,21 @@ impl HatQueriesTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: ( - context.face.clone(), - key_expr.to_owned(), - NodeId::default(), - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } - } + for (sid, context) in &mres.session_ctxs { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: (context.face.clone(), key_expr.to_owned(), NodeId::default()), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); } } } @@ -1610,11 +428,7 @@ impl HatQueriesTrait for HatCode { .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { + if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); } } @@ -1631,73 +445,8 @@ impl HatQueriesTrait for HatCode { client_query_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + routes.client_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); routes } @@ -1706,83 +455,12 @@ impl HatQueriesTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 3cf318831b..4b20278d1d 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -18,13 +18,9 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use self::{ - network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, - }, + network::Network, + pubsub::{pubsub_new_face, pubsub_remove_node, undeclare_client_subscription}, + queries::{queries_new_face, queries_remove_node, undeclare_client_queryable}, }; use super::{ super::dispatcher::{ @@ -42,8 +38,7 @@ use crate::{ use async_std::task::JoinHandle; use std::{ any::Any, - collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, + collections::{HashMap, HashSet}, sync::Arc, }; use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; @@ -110,160 +105,41 @@ macro_rules! face_hat_mut { use face_hat_mut; struct HatTables { - router_subs: HashSet>, peer_subs: HashSet>, - router_qabls: HashSet>, peer_qabls: HashSet>, - routers_net: Option, peers_net: Option, - shared_nodes: Vec, - routers_trees_task: Option>, peers_trees_task: Option>, - router_peers_failover_brokering: bool, } impl HatTables { - fn new(router_peers_failover_brokering: bool) -> Self { + fn new() -> Self { Self { - router_subs: HashSet::new(), peer_subs: HashSet::new(), - router_qabls: HashSet::new(), peer_qabls: HashSet::new(), - routers_net: None, peers_net: None, - shared_nodes: vec![], - routers_trees_task: None, peers_trees_task: None, - router_peers_failover_brokering, - } - } - - #[inline] - fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { - match net_type { - WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), - _ => None, } } - #[inline] - fn full_net(&self, net_type: WhatAmI) -> bool { - match net_type { - WhatAmI::Router => self - .routers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - WhatAmI::Peer => self - .peers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - _ => false, - } - } - - #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net - .as_ref() - .unwrap() - .get_links(peer) - .iter() - .filter(move |nid| { - if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { - node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router - } else { - false - } - }) - } - - #[inline] - fn elect_router<'a>( - &'a self, - self_zid: &'a ZenohId, - key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { - match routers.next() { - None => self_zid, - Some(router) => { - let hash = |r: &ZenohId| { - let mut hasher = DefaultHasher::new(); - for b in key_expr.as_bytes() { - hasher.write_u8(*b); - } - for b in &r.to_le_bytes()[..r.size()] { - hasher.write_u8(*b); - } - hasher.finish() - }; - let mut res = router; - let mut h = None; - for router2 in routers { - let h2 = hash(router2); - if h2 > *h.get_or_insert_with(|| hash(res)) { - res = router2; - h = Some(h2); - } - } - res - } - } - } - - #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { - // if source_links is empty then gossip is probably disabled in source peer - !source_links.is_empty() && !source_links.contains(&dest) - } - - #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { - self.router_peers_failover_brokering - && self - .peers_net - .as_ref() - .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) - .unwrap_or(false) - } - - fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { + fn schedule_compute_trees(&mut self, tables_ref: Arc) { log::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { + if self.peers_trees_task.is_none() { let task = Some(async_std::task::spawn(async move { async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) .await; let mut tables = zwrite!(tables_ref.tables); log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; + let new_childs = hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(); log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); + pubsub::pubsub_tree_change(&mut tables, &new_childs); + queries::queries_tree_change(&mut tables, &new_childs); log::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; + hat_mut!(tables).peers_trees_task = None; })); - match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, - }; + self.peers_trees_task = task; } } } @@ -282,47 +158,26 @@ impl HatBaseTrait for HatCode { WhatAmIMatcher::empty() }; - let router_full_linkstate = whatami == WhatAmI::Router; let peer_full_linkstate = whatami != WhatAmI::Client && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); - if router_full_linkstate | gossip { - hat_mut!(tables).routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } + hat_mut!(tables).peers_net = Some(Network::new( + "[Peers network]".to_string(), + tables.zid, + runtime, + peer_full_linkstate, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); } - fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { - Box::new(HatTables::new(router_peers_failover_brokering)) + fn new_tables(&self, _router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new()) } fn new_face(&self) -> Box { @@ -351,47 +206,22 @@ impl HatBaseTrait for HatCode { face: &mut Face, transport: &TransportUnicast, ) -> ZResult<()> { - let link_id = match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } + let link_id = if face.state.whatami != WhatAmI::Client { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + net.add_link(transport.clone()) + } else { + 0 } - _ => 0, + } else { + 0 }; - if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - face_hat_mut!(&mut face.state).link_id = link_id; pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); - match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } - } - _ => (), + if face.state.whatami != WhatAmI::Client { + hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); } Ok(()) } @@ -515,74 +345,17 @@ impl HatBaseTrait for HatCode { let list: LinkStateList = codec.read(&mut reader).unwrap(); let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } + if whatami != WhatAmI::Client { + if let Some(net) = hat_mut!(tables).peers_net.as_mut() { + let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); + for (_, removed_node) in changes.removed_nodes { + pubsub_remove_node(tables, &removed_node.zid); + queries_remove_node(tables, &removed_node.zid); } - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), }; } } @@ -597,39 +370,11 @@ impl HatBaseTrait for HatCode { face: &FaceState, routing_context: NodeId, ) -> NodeId { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - }, - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - } + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face_hat!(face).link_id) } fn closing( @@ -640,56 +385,18 @@ impl HatBaseTrait for HatCode { ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + if whatami != WhatAmI::Client { + for (_, removed_node) in hat_mut!(tables) + .peers_net + .as_mut() + .unwrap() + .remove_link(&zid) + { + pubsub_remove_node(tables, &removed_node.zid); + queries_remove_node(tables, &removed_node.zid); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in hat_mut!(tables) - .peers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), + hat_mut!(tables).schedule_compute_trees(tables_ref.clone()); }; } (_, _) => log::error!("Closed transport in session closing!"), @@ -702,56 +409,29 @@ impl HatBaseTrait for HatCode { } #[inline] - fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) + fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { + true } #[inline] fn egress_filter( &self, - tables: &Tables, + _tables: &Tables, src_face: &FaceState, out_face: &Arc, - expr: &mut RoutingExpr, + _expr: &mut RoutingExpr, ) -> bool { - if src_face.id != out_face.id + src_face.id != out_face.id && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { (Some(l), Some(r)) => l != r, _ => true, } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); - } - false } } struct HatContext { router_subs: HashSet, peer_subs: HashSet, - router_qabls: HashMap, peer_qabls: HashMap, } @@ -760,7 +440,6 @@ impl HatContext { Self { router_subs: HashSet::new(), peer_subs: HashSet::new(), - router_qabls: HashMap::new(), peer_qabls: HashMap::new(), } } @@ -786,33 +465,6 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - match hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_link(face_hat!(face).link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - face - ); - None - } - } -} - fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { match hat!(tables) .peers_net diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs index 421850dc87..ac2d068953 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/network.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/network.rs @@ -17,7 +17,7 @@ use crate::net::routing::dispatcher::tables::NodeId; use crate::net::runtime::Runtime; use async_std::task; use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; +use petgraph::visit::{VisitMap, Visitable}; use std::convert::TryInto; use vec_map::VecMap; use zenoh_buffers::writer::{DidntWrite, HasWriter}; @@ -168,11 +168,6 @@ impl Network { // ) // } - #[inline] - pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { - self.graph.node_weights().find(|weight| weight.zid == *zid) - } - #[inline] pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { self.graph @@ -984,24 +979,4 @@ impl Network { new_childs } - - #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { - self.get_node(&node) - .map(|node| &node.links[..]) - .unwrap_or_default() - } -} - -#[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { - net1.graph - .node_references() - .filter_map(|(_, node1)| { - net2.graph - .node_references() - .any(|(_, node2)| node1.zid == node2.zid) - .then_some(node1.zid) - }) - .collect() } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 189e6cb6e8..40bd6d24d7 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -13,7 +13,7 @@ // use super::network::Network; use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -80,35 +80,15 @@ fn send_sourced_subscription_to_net_childs( #[inline] fn propagate_simple_subscription_to( - tables: &mut Tables, + _tables: &mut Tables, dst_face: &mut Arc, res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, - full_peer_net: bool, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !face_hat!(dst_face).local_subs.contains(res) - && match tables.whatami { - WhatAmI::Router => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) - } - } - WhatAmI::Peer => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client - } - } - _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, - } + && dst_face.whatami == WhatAmI::Client { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); @@ -134,21 +114,13 @@ fn propagate_simple_subscription( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_subscription_to( - tables, - &mut dst_face, - res, - sub_info, - src_face, - full_peer_net, - ); + propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); } } @@ -158,9 +130,8 @@ fn propagate_sourced_subscription( sub_info: &SubscriberInfo, src_face: Option<&Arc>, source: &ZenohId, - net_type: WhatAmI, ) { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -190,93 +161,6 @@ fn propagate_sourced_subscription( } } -fn register_router_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - if !res_hat!(res).router_subs.contains(&router) { - // Register router subscription - { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.insert(router); - hat_mut!(tables).router_subs.insert(res.clone()); - } - - // Propagate subscription to routers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); - } - // Propagate subscription to peers - if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) - } - - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); -} - -fn declare_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_subscription(&mut wtables, face, &mut res, sub_info, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } -} - fn register_peer_subscription( tables: &mut Tables, face: &mut Arc, @@ -293,7 +177,7 @@ fn register_peer_subscription( } // Propagate subscription to peers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); + propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer); } if tables.whatami == WhatAmI::Peer { @@ -336,12 +220,6 @@ fn declare_peer_subscription( (res, wtables) }; register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - if wtables.whatami == WhatAmI::Router { - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - } disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -440,75 +318,8 @@ fn declare_client_subscription( register_client_subscription(&mut wtables, face, &mut res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - match wtables.whatami { - WhatAmI::Router => { - let zid = wtables.zid; - register_router_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let zid = wtables.zid; - register_peer_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } else { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } - _ => { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } + let zid = wtables.zid; + register_peer_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -528,15 +339,6 @@ fn declare_client_subscription( } } -#[inline] -fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_subs - .iter() - .any(|peer| peer != &tables.zid) -} - #[inline] fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { res.context.is_some() @@ -621,55 +423,13 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc } } -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_subs.len() == 1 - && res_hat!(res).router_subs.contains(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_subs.remove(res); - } - } - } -} - fn propagate_forget_sourced_subscription( tables: &Tables, res: &Arc, src_face: Option<&Arc>, source: &ZenohId, - net_type: WhatAmI, ) { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -698,74 +458,6 @@ fn propagate_forget_sourced_subscription( } } -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.retain(|sub| sub != router); - - if res_hat!(res).router_subs.is_empty() { - hat_mut!(tables) - .router_subs - .retain(|sub| !Arc::ptr_eq(sub, res)); - - if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_subscription(tables, res); - } - - propagate_forget_simple_subscription_to_peers(tables, res); -} - -fn undeclare_router_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_subs.contains(router) { - unregister_router_subscription(tables, res, router); - propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router subscription!"), - }, - None => log::error!("Undeclare router subscription with unknown scope!"), - } -} - fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { log::debug!( "Unregister peer subscription {} (peer: {})", @@ -793,7 +485,7 @@ fn undeclare_peer_subscription( ) { if res_hat!(res).peer_subs.contains(peer) { unregister_peer_subscription(tables, res, peer); - propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); + propagate_forget_sourced_subscription(tables, res, face, peer); } } @@ -810,14 +502,6 @@ fn forget_peer_subscription( drop(rtables); let mut wtables = zwrite!(tables.tables); undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } - } disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -851,32 +535,11 @@ pub(super) fn undeclare_client_subscription( face_hat_mut!(face).remote_subs.remove(res); let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); let peer_subs = remote_peer_subs(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if client_subs.is_empty() { - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription(tables, res); - } - } - } - _ => { - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - } + if client_subs.is_empty() { + undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); } - if client_subs.len() == 1 && !router_subs && !peer_subs { + if client_subs.len() == 1 && !peer_subs { let face = &mut client_subs[0]; if face_hat!(face).local_subs.contains(res) && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) @@ -939,196 +602,62 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, }; - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } - WhatAmI::Client => { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } + + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).peer_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - for mut res in hat!(tables) - .router_subs - .iter() - .filter(|res| res_hat!(res).router_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_router_subscription(tables, &mut res, node); - - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - WhatAmI::Peer => { - for mut res in hat!(tables) - .peer_subs - .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_peer_subscription(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); - } - } +pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { + for mut res in hat!(tables) + .peer_subs + .iter() + .filter(|res| res_hat!(res).peer_subs.contains(node)) + .cloned() + .collect::>>() + { + unregister_peer_subscription(tables, &mut res, node); - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } + // compute_matches_data_routes(tables, &mut res); + let matches_data_routes = compute_matches_data_routes_(tables, &res); + for (mut res, data_routes) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); } - _ => (), + Resource::clean(&mut res) } } -pub(super) fn pubsub_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { +pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec]) { // propagate subs to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; - let subs_res = match net_type { - WhatAmI::Router => &hat!(tables).router_subs, - _ => &hat!(tables).peer_subs, - }; + let subs_res = &hat!(tables).peer_subs; for res in subs_res { - let subs = match net_type { - WhatAmI::Router => &res_hat!(res).router_subs, - _ => &res_hat!(res).peer_subs, - }; + let subs = &res_hat!(res).peer_subs; for sub in subs { if *sub == tree_id { let sub_info = SubscriberInfo { @@ -1155,90 +684,6 @@ pub(super) fn pubsub_tree_change( compute_data_routes_from(tables, &mut tables.root_res.clone()); } -pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid).cloned() { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_subs { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); - - face_hat_mut!(dst_face).local_subs.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // TODO - mode: Mode::Push, - }; - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } - } - } - } - } - } -} - #[inline] fn insert_faces_for_subs( route: &mut Route, @@ -1282,24 +727,12 @@ impl HatPubSubTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_subscription(tables, rtables, face, expr, sub_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) - } - } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) - } + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) } - _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + } else { + declare_client_subscription(tables, rtables, face, expr, sub_info) } } @@ -1311,24 +744,12 @@ impl HatPubSubTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_subscription(tables, rtables, face, expr, &router) - } + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_subscription(tables, rtables, face, expr, &peer); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer) - } - } else { - forget_client_subscription(tables, rtables, face, expr) - } - } - _ => forget_client_subscription(tables, rtables, face, expr), + } else { + forget_client_subscription(tables, rtables, face, expr); } } @@ -1364,82 +785,37 @@ impl HatPubSubTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_subs, - ); - } - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_subs, + ); - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -1493,34 +869,32 @@ impl HatPubSubTrait for HatCode { client_data_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + for idx in &indexes { + routes.peers_data_routes[idx.index()] = + self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + + routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes + } + + fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); let indexes = hat!(tables) .peers_net .as_ref() @@ -1529,121 +903,19 @@ impl HatPubSubTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.peers_data_routes[idx.index()] = self.compute_data_route( + peers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer, ); } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } - routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); - routes - } - - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index c1093a8a00..42917b350c 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -13,7 +13,7 @@ // use super::network::Network; use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -56,65 +56,10 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI this } -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if hat!(tables).full_net(WhatAmI::Peer) { - res.context.as_ref().and_then(|_| { - res_hat!(res) - .peer_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res_hat!(res) - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; +fn local_peer_qabl_info(_tables: &Tables, res: &Arc) -> QueryableInfo { res.session_ctxs .values() - .fold(info, |accu, ctx| { + .fold(None, |accu, ctx| { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { Some(accu) => merge_qabl_infos(accu, info), @@ -131,9 +76,9 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { } fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { - let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + let info = if res.context.is_some() { res_hat!(res) - .router_qabls + .peer_qabls .iter() .fold(None, |accu, (zid, info)| { if *zid != tables.zid { @@ -148,27 +93,11 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) } else { None }; - if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { - info = res_hat!(res) - .peer_qabls - .iter() - .fold(info, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } res.session_ctxs .values() .fold(info, |accu, ctx| { if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer || face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(ctx.face.zid, face.zid) { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { @@ -236,43 +165,13 @@ fn propagate_simple_queryable( res: &Arc, src_face: Option<&mut Arc>, ) { - let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) - && match tables.whatami { - WhatAmI::Router => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering( - src_face.as_ref().unwrap().zid, - dst_face.zid, - )) - } - } - WhatAmI::Peer => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - _ => { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } + && dst_face.whatami == WhatAmI::Client { face_hat_mut!(&mut dst_face) .local_qabls @@ -301,9 +200,8 @@ fn propagate_sourced_queryable( qabl_info: &QueryableInfo, src_face: Option<&mut Arc>, source: &ZenohId, - net_type: WhatAmI, ) { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -333,103 +231,6 @@ fn propagate_sourced_queryable( } } -fn register_router_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - let current_info = res_hat!(res).router_qabls.get(&router); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register router queryable - { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.insert(router, *qabl_info); - hat_mut!(tables).router_qabls.insert(res.clone()); - } - - // Propagate queryable to routers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &router, - WhatAmI::Router, - ); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - // Propagate queryable to peers - if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) - } - } - - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); -} - -fn declare_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register router queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - fn register_peer_queryable( tables: &mut Tables, mut face: Option<&mut Arc>, @@ -447,14 +248,7 @@ fn register_peer_queryable( } // Propagate queryable to peers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &peer, - WhatAmI::Peer, - ); + propagate_sourced_queryable(tables, res, qabl_info, face.as_deref_mut(), &peer); } if tables.whatami == WhatAmI::Peer { @@ -497,13 +291,8 @@ fn declare_peer_queryable( Resource::match_resource(&wtables, &mut res, matches); (res, wtables) }; - let mut face = Some(face); - register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - if wtables.whatami == WhatAmI::Router { - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - } + let face = Some(face); + register_peer_queryable(&mut wtables, face, &mut res, qabl_info, peer); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -584,37 +373,9 @@ fn declare_client_queryable( register_client_queryable(&mut wtables, face, &mut res, qabl_info); - match wtables.whatami { - WhatAmI::Router => { - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } else { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - _ => { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } + let local_details = local_peer_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_peer_queryable(&mut wtables, Some(face), &mut res, &local_details, zid); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -634,15 +395,6 @@ fn declare_client_queryable( } } -#[inline] -fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_qabls - .keys() - .any(|router| router != &tables.zid) -} - #[inline] fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { res.context.is_some() @@ -728,55 +480,13 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_qabls.len() == 1 - && res_hat!(res).router_qabls.contains_key(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_qabls.contains_key(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_qabls.remove(res); - } - } - } -} - fn propagate_forget_sourced_queryable( tables: &mut Tables, res: &mut Arc, src_face: Option<&Arc>, source: &ZenohId, - net_type: WhatAmI, ) { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); match net.get_idx(source) { Some(tree_sid) => { if net.trees.len() > tree_sid.index() { @@ -805,75 +515,6 @@ fn propagate_forget_sourced_queryable( } } -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.remove(router); - - if res_hat!(res).router_qabls.is_empty() { - hat_mut!(tables) - .router_qabls - .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_queryable(tables, res); - } - - propagate_forget_simple_queryable_to_peers(tables, res); -} - -fn undeclare_router_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); - propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router queryable!"), - }, - None => log::error!("Undeclare router queryable with unknown scope!"), - } -} - fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); res_hat_mut!(res).peer_qabls.remove(peer); @@ -897,7 +538,7 @@ fn undeclare_peer_queryable( ) { if res_hat!(res).peer_qabls.contains_key(peer) { unregister_peer_queryable(tables, res, peer); - propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); + propagate_forget_sourced_queryable(tables, res, face, peer); } } @@ -914,18 +555,6 @@ fn forget_peer_queryable( drop(rtables); let mut wtables = zwrite!(tables.tables); undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - - if wtables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } - } drop(wtables); let rtables = zread!(tables.tables); @@ -961,43 +590,16 @@ pub(super) fn undeclare_client_queryable( } let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); let peer_qabls = remote_peer_qabls(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - } else if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - _ => { - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } + if client_qabls.is_empty() { + undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_peer_qabl_info(tables, res); + register_peer_queryable(tables, None, res, &local_info, tables.zid); } - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + if client_qabls.len() == 1 && !peer_qabls { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -1054,278 +656,65 @@ fn forget_client_queryable( } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } else { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } - WhatAmI::Client => { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } + if face.whatami == WhatAmI::Client { + for qabl in &hat!(tables).peer_qabls { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } } -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - } - } - WhatAmI::Peer => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); - } - } - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res) +pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { + let mut qabls = vec![]; + for res in hat!(tables).peer_qabls.iter() { + for qabl in res_hat!(res).peer_qabls.keys() { + if qabl == node { + qabls.push(res.clone()); } } - _ => (), } -} - -pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid) { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_qabls { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); + for mut res in qabls { + unregister_peer_queryable(tables, &mut res, node); - face_hat_mut!(dst_face).local_qabls.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - face_hat_mut!(dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } - } - } - } + let matches_query_routes = compute_matches_query_routes_(tables, &res); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); } + Resource::clean(&mut res) } } -pub(super) fn queries_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { +pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec]) { // propagate qabls to new childs for (tree_sid, tree_childs) in new_childs.iter().enumerate() { if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); + let net = hat!(tables).peers_net.as_ref().unwrap(); let tree_idx = NodeIndex::new(tree_sid); if net.graph.contains_node(tree_idx) { let tree_id = net.graph[tree_idx].zid; - let qabls_res = match net_type { - WhatAmI::Router => &hat!(tables).router_qabls, - _ => &hat!(tables).peer_qabls, - }; + let qabls_res = &hat!(tables).peer_qabls; for res in qabls_res { - let qabls = match net_type { - WhatAmI::Router => &res_hat!(res).router_qabls, - _ => &res_hat!(res).peer_qabls, - }; + let qabls = &res_hat!(res).peer_qabls; if let Some(qabl_info) = qabls.get(&tree_id) { send_sourced_queryable_to_net_childs( tables, @@ -1403,24 +792,12 @@ impl HatQueriesTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_queryable(tables, rtables, face, expr, qabl_info, router) - } + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(&rtables, face, node_id) { + declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) - } - } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info) - } - } - _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + } else { + declare_client_queryable(tables, rtables, face, expr, qabl_info); } } @@ -1432,24 +809,13 @@ impl HatQueriesTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_queryable(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer) - } - } else { - forget_client_queryable(tables, rtables, face, expr) - } + + if face.whatami != WhatAmI::Client { + if let Some(peer) = get_peer(&rtables, face, node_id) { + forget_peer_queryable(tables, rtables, face, expr, &peer); } - _ => forget_client_queryable(tables, rtables, face, expr), + } else { + forget_client_queryable(tables, rtables, face, expr); } } @@ -1485,94 +851,45 @@ impl HatQueriesTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_qabls, - complete, - ); - } - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } + let net = hat!(tables).peers_net.as_ref().unwrap(); + let peer_source = match source_type { + WhatAmI::Router | WhatAmI::Peer => source, + _ => net.idx.index() as NodeId, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + peer_source, + &res_hat!(mres).peer_qabls, + complete, + ); - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: ( - context.face.clone(), - key_expr.to_owned(), - NodeId::default(), - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } + for (sid, context) in &mres.session_ctxs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, + } { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: ( + context.face.clone(), + key_expr.to_owned(), + NodeId::default(), + ), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); } } } @@ -1631,34 +948,31 @@ impl HatQueriesTrait for HatCode { client_query_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + for idx in &indexes { + routes.peers_query_routes[idx.index()] = + self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + routes + } + + fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + let mut expr = RoutingExpr::new(res, ""); + let indexes = hat!(tables) .peers_net .as_ref() @@ -1667,12 +981,13 @@ impl HatQueriesTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes + let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + peers_query_routes.clear(); + peers_query_routes .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.peers_query_routes[idx.index()] = self.compute_query_route( + peers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, idx.index() as NodeId, @@ -1680,109 +995,5 @@ impl HatQueriesTrait for HatCode { ); } } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } - routes - } - - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } - } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs new file mode 100644 index 0000000000..75d97ae1dc --- /dev/null +++ b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs @@ -0,0 +1,564 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// +use crate::net::codec::Zenoh080Routing; +use crate::net::protocol::linkstate::{LinkState, LinkStateList}; +use crate::net::runtime::Runtime; +use async_std::task; +use petgraph::graph::NodeIndex; +use std::convert::TryInto; +use vec_map::VecMap; +use zenoh_buffers::writer::{DidntWrite, HasWriter}; +use zenoh_buffers::ZBuf; +use zenoh_codec::WCodec; +use zenoh_link::Locator; +use zenoh_protocol::common::ZExtBody; +use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_protocol::network::oam::id::OAM_LINKSTATE; +use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; +use zenoh_transport::TransportUnicast; + +#[derive(Clone)] +struct Details { + zid: bool, + locators: bool, + links: bool, +} + +#[derive(Clone)] +pub(super) struct Node { + pub(super) zid: ZenohId, + pub(super) whatami: Option, + pub(super) locators: Option>, + pub(super) sn: u64, + pub(super) links: Vec, +} + +impl std::fmt::Debug for Node { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.zid) + } +} + +pub(super) struct Link { + pub(super) transport: TransportUnicast, + zid: ZenohId, + mappings: VecMap, + local_mappings: VecMap, +} + +impl Link { + fn new(transport: TransportUnicast) -> Self { + let zid = transport.get_zid().unwrap(); + Link { + transport, + zid, + mappings: VecMap::new(), + local_mappings: VecMap::new(), + } + } + + #[inline] + pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { + self.mappings.insert(psid.try_into().unwrap(), zid); + } + + #[inline] + pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { + self.mappings.get((*psid).try_into().unwrap()) + } + + #[inline] + pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { + self.local_mappings + .insert(psid.try_into().unwrap(), local_psid); + } +} + +pub(super) struct Network { + pub(super) name: String, + pub(super) router_peers_failover_brokering: bool, + pub(super) gossip: bool, + pub(super) gossip_multihop: bool, + pub(super) autoconnect: WhatAmIMatcher, + pub(super) idx: NodeIndex, + pub(super) links: VecMap, + pub(super) graph: petgraph::stable_graph::StableUnGraph, + pub(super) runtime: Runtime, +} + +impl Network { + #[allow(clippy::too_many_arguments)] + pub(super) fn new( + name: String, + zid: ZenohId, + runtime: Runtime, + router_peers_failover_brokering: bool, + gossip: bool, + gossip_multihop: bool, + autoconnect: WhatAmIMatcher, + ) -> Self { + let mut graph = petgraph::stable_graph::StableGraph::default(); + log::debug!("{} Add node (self) {}", name, zid); + let idx = graph.add_node(Node { + zid, + whatami: Some(runtime.whatami), + locators: None, + sn: 1, + links: vec![], + }); + Network { + name, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + idx, + links: VecMap::new(), + graph, + runtime, + } + } + + //noinspection ALL + // pub(super) fn dot(&self) -> String { + // std::format!( + // "{:?}", + // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) + // ) + // } + + #[inline] + pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { + self.graph + .node_indices() + .find(|idx| self.graph[*idx].zid == *zid) + } + + #[inline] + pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { + self.links.values().find(|link| link.zid == *zid) + } + + fn add_node(&mut self, node: Node) -> NodeIndex { + let zid = node.zid; + let idx = self.graph.add_node(node); + for link in self.links.values_mut() { + if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { + link.local_mappings.insert(psid, idx.index() as u64); + } + } + idx + } + + fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { + let links = if details.links { + self.graph[idx] + .links + .iter() + .filter_map(|zid| { + if let Some(idx2) = self.get_idx(zid) { + Some(idx2.index().try_into().unwrap()) + } else { + log::error!( + "{} Internal error building link state: cannot get index of {}", + self.name, + zid + ); + None + } + }) + .collect() + } else { + vec![] + }; + LinkState { + psid: idx.index().try_into().unwrap(), + sn: self.graph[idx].sn, + zid: if details.zid { + Some(self.graph[idx].zid) + } else { + None + }, + whatami: self.graph[idx].whatami, + locators: if details.locators { + if idx == self.idx { + Some(self.runtime.get_locators()) + } else { + self.graph[idx].locators.clone() + } + } else { + None + }, + links, + } + } + + fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { + let mut link_states = vec![]; + for (idx, details) in idxs { + link_states.push(self.make_link_state(idx, details)); + } + let codec = Zenoh080Routing::new(); + let mut buf = ZBuf::empty(); + codec.write(&mut buf.writer(), &LinkStateList { link_states })?; + Ok(NetworkBody::OAM(Oam { + id: OAM_LINKSTATE, + body: ZExtBody::ZBuf(buf), + ext_qos: oam::ext::QoSType::oam_default(), + ext_tstamp: None, + }) + .into()) + } + + fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { + if let Ok(msg) = self.make_msg(idxs) { + log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); + if let Err(e) = transport.schedule(msg) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) + where + P: FnMut(&Link) -> bool, + { + if let Ok(msg) = self.make_msg(idxs) { + for link in self.links.values() { + if parameters(link) { + log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); + if let Err(e) = link.transport.schedule(msg.clone()) { + log::debug!("{} Error sending LinkStateList: {}", self.name, e); + } + } + } + } else { + log::error!("Failed to encode Linkstate message"); + } + } + + // Indicates if locators should be included when propagating Linkstate message + // from the given node. + // Returns true if gossip is enabled and if multihop gossip is enabled or + // the node is one of self neighbours. + fn propagate_locators(&self, idx: NodeIndex) -> bool { + self.gossip + && (self.gossip_multihop + || idx == self.idx + || self.links.values().any(|link| { + self.graph + .node_weight(idx) + .map(|node| link.zid == node.zid) + .unwrap_or(true) + })) + } + + pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) { + log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); + + let graph = &self.graph; + let links = &mut self.links; + + let src_link = match links.values_mut().find(|link| link.zid == src) { + Some(link) => link, + None => { + log::error!( + "{} Received LinkStateList from unknown link {}", + self.name, + src + ); + return; + } + }; + + // register psid<->zid mappings & apply mapping to nodes + #[allow(clippy::needless_collect)] // need to release borrow on self + let link_states = link_states + .into_iter() + .filter_map(|link_state| { + if let Some(zid) = link_state.zid { + src_link.set_zid_mapping(link_state.psid, zid); + if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { + src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); + } + Some(( + zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )) + } else { + match src_link.get_zid(&link_state.psid) { + Some(zid) => Some(( + *zid, + link_state.whatami.unwrap_or(WhatAmI::Router), + link_state.locators, + link_state.sn, + link_state.links, + )), + None => { + log::error!( + "Received LinkState from {} with unknown node mapping {}", + src, + link_state.psid + ); + None + } + } + } + }) + .collect::>(); + + // apply psid<->zid mapping to links + let src_link = self.get_link_from_zid(&src).unwrap(); + let link_states = link_states + .into_iter() + .map(|(zid, wai, locs, sn, links)| { + let links: Vec = links + .iter() + .filter_map(|l| { + if let Some(zid) = src_link.get_zid(l) { + Some(*zid) + } else { + log::error!( + "{} Received LinkState from {} with unknown link mapping {}", + self.name, + src, + l + ); + None + } + }) + .collect(); + (zid, wai, locs, sn, links) + }) + .collect::>(); + + // log::trace!( + // "{} Received from {} mapped: {:?}", + // self.name, + // src, + // link_states + // ); + for link_state in &link_states { + log::trace!( + "{} Received from {} mapped: {:?}", + self.name, + src, + link_state + ); + } + + for (zid, whatami, locators, sn, links) in link_states.into_iter() { + let idx = match self.get_idx(&zid) { + None => { + let idx = self.add_node(Node { + zid, + whatami: Some(whatami), + locators: locators.clone(), + sn, + links, + }); + locators.is_some().then_some(idx) + } + Some(idx) => { + let node = &mut self.graph[idx]; + let oldsn = node.sn; + (oldsn < sn) + .then(|| { + node.sn = sn; + node.links = links.clone(); + (node.locators != locators && locators.is_some()).then(|| { + node.locators = locators.clone(); + idx + }) + }) + .flatten() + } + }; + + if self.gossip { + if let Some(idx) = idx { + if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { + self.send_on_links( + vec![( + idx, + Details { + zid: true, + locators: true, + links: false, + }, + )], + |link| link.zid != zid, + ); + } + + if !self.autoconnect.is_empty() { + // Connect discovered peers + if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) + .is_none() + && self.autoconnect.matches(whatami) + { + if let Some(locators) = locators { + let runtime = self.runtime.clone(); + self.runtime.spawn(async move { + // random backoff + async_std::task::sleep(std::time::Duration::from_millis( + rand::random::() % 100, + )) + .await; + runtime.connect_peer(&zid, &locators).await; + }); + } + } + } + } + } + } + } + + pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { + let free_index = { + let mut i = 0; + while self.links.contains_key(i) { + i += 1; + } + i + }; + self.links.insert(free_index, Link::new(transport.clone())); + + let zid = transport.get_zid().unwrap(); + let whatami = transport.get_whatami().unwrap(); + + if self.router_peers_failover_brokering { + let (idx, new) = match self.get_idx(&zid) { + Some(idx) => (idx, false), + None => { + log::debug!("{} Add node (link) {}", self.name, zid); + ( + self.add_node(Node { + zid, + whatami: Some(whatami), + locators: None, + sn: 0, + links: vec![], + }), + true, + ) + } + }; + self.graph[self.idx].links.push(zid); + self.graph[self.idx].sn += 1; + + // Send updated self linkstate on all existing links except new one + self.links + .values() + .filter(|link| { + link.zid != zid + && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) == WhatAmI::Router + }) + .for_each(|link| { + self.send_on_link( + if new || (!self.gossip_multihop) { + vec![ + ( + idx, + Details { + zid: true, + locators: false, + links: false, + }, + ), + ( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + ), + ] + } else { + vec![( + self.idx, + Details { + zid: false, + locators: self.propagate_locators(idx), + links: true, + }, + )] + }, + &link.transport, + ) + }); + } + + // Send all nodes linkstate on new link + let idxs = self + .graph + .node_indices() + .filter_map(|idx| { + (self.gossip_multihop + || self.links.values().any(|link| link.zid == zid) + || (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router)) + .then(|| { + ( + idx, + Details { + zid: true, + locators: self.propagate_locators(idx), + links: (self.router_peers_failover_brokering + && idx == self.idx + && whatami == WhatAmI::Router), + }, + ) + }) + }) + .collect(); + self.send_on_link(idxs, &transport); + free_index + } + + pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { + log::trace!("{} remove_link {}", self.name, zid); + self.links.retain(|_, link| link.zid != *zid); + self.graph[self.idx].links.retain(|link| *link != *zid); + + if let Some(idx) = self.get_idx(zid) { + self.graph.remove_node(idx); + } + if self.router_peers_failover_brokering { + self.send_on_links( + vec![( + self.idx, + Details { + zid: false, + locators: self.gossip, + links: true, + }, + )], + |link| { + link.zid != *zid + && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) == WhatAmI::Router + }, + ); + } + vec![] + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 3cf318831b..3e74672903 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -17,14 +17,17 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use self::{ - network::{shared_nodes, Network}, - pubsub::{ - pubsub_linkstate_change, pubsub_new_face, pubsub_remove_node, undeclare_client_subscription, - }, - queries::{ - queries_linkstate_change, queries_new_face, queries_remove_node, undeclare_client_queryable, +use crate::{ + net::{ + codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, }, + runtime::Runtime, +}; + +use self::{ + gossip::Network, + pubsub::{pubsub_new_face, undeclare_client_subscription}, + queries::{queries_new_face, undeclare_client_queryable}, }; use super::{ super::dispatcher::{ @@ -33,43 +36,25 @@ use super::{ }, HatBaseTrait, HatTrait, }; -use crate::{ - net::{ - codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, - }, - runtime::Runtime, -}; -use async_std::task::JoinHandle; use std::{ any::Any, - collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, + collections::{HashMap, HashSet}, sync::Arc, }; -use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher, ZenohId}; +use zenoh_config::{unwrap_or_default, ModeDependent, WhatAmI, WhatAmIMatcher}; +use zenoh_protocol::network::Oam; use zenoh_protocol::{ common::ZExtBody, - network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE, Oam}, + network::{declare::queryable::ext::QueryableInfo, oam::id::OAM_LINKSTATE}, }; use zenoh_result::ZResult; use zenoh_sync::get_mut_unchecked; use zenoh_transport::TransportUnicast; -mod network; +mod gossip; mod pubsub; mod queries; -zconfigurable! { - static ref TREES_COMPUTATION_DELAY: u64 = 100; -} - -macro_rules! hat { - ($t:expr) => { - $t.hat.downcast_ref::().unwrap() - }; -} -use hat; - macro_rules! hat_mut { ($t:expr) => { $t.hat.downcast_mut::().unwrap() @@ -77,24 +62,6 @@ macro_rules! hat_mut { } use hat_mut; -macro_rules! res_hat { - ($r:expr) => { - $r.context().hat.downcast_ref::().unwrap() - }; -} -use res_hat; - -macro_rules! res_hat_mut { - ($r:expr) => { - get_mut_unchecked($r) - .context_mut() - .hat - .downcast_mut::() - .unwrap() - }; -} -use res_hat_mut; - macro_rules! face_hat { ($f:expr) => { $f.hat.downcast_ref::().unwrap() @@ -110,161 +77,12 @@ macro_rules! face_hat_mut { use face_hat_mut; struct HatTables { - router_subs: HashSet>, - peer_subs: HashSet>, - router_qabls: HashSet>, - peer_qabls: HashSet>, - routers_net: Option, - peers_net: Option, - shared_nodes: Vec, - routers_trees_task: Option>, - peers_trees_task: Option>, - router_peers_failover_brokering: bool, + gossip: Option, } impl HatTables { - fn new(router_peers_failover_brokering: bool) -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashSet::new(), - peer_qabls: HashSet::new(), - routers_net: None, - peers_net: None, - shared_nodes: vec![], - routers_trees_task: None, - peers_trees_task: None, - router_peers_failover_brokering, - } - } - - #[inline] - fn get_net(&self, net_type: WhatAmI) -> Option<&Network> { - match net_type { - WhatAmI::Router => self.routers_net.as_ref(), - WhatAmI::Peer => self.peers_net.as_ref(), - _ => None, - } - } - - #[inline] - fn full_net(&self, net_type: WhatAmI) -> bool { - match net_type { - WhatAmI::Router => self - .routers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - WhatAmI::Peer => self - .peers_net - .as_ref() - .map(|net| net.full_linkstate) - .unwrap_or(false), - _ => false, - } - } - - #[inline] - fn get_router_links(&self, peer: ZenohId) -> impl Iterator + '_ { - self.peers_net - .as_ref() - .unwrap() - .get_links(peer) - .iter() - .filter(move |nid| { - if let Some(node) = self.routers_net.as_ref().unwrap().get_node(nid) { - node.whatami.unwrap_or(WhatAmI::Router) == WhatAmI::Router - } else { - false - } - }) - } - - #[inline] - fn elect_router<'a>( - &'a self, - self_zid: &'a ZenohId, - key_expr: &str, - mut routers: impl Iterator, - ) -> &'a ZenohId { - match routers.next() { - None => self_zid, - Some(router) => { - let hash = |r: &ZenohId| { - let mut hasher = DefaultHasher::new(); - for b in key_expr.as_bytes() { - hasher.write_u8(*b); - } - for b in &r.to_le_bytes()[..r.size()] { - hasher.write_u8(*b); - } - hasher.finish() - }; - let mut res = router; - let mut h = None; - for router2 in routers { - let h2 = hash(router2); - if h2 > *h.get_or_insert_with(|| hash(res)) { - res = router2; - h = Some(h2); - } - } - res - } - } - } - - #[inline] - fn failover_brokering_to(source_links: &[ZenohId], dest: ZenohId) -> bool { - // if source_links is empty then gossip is probably disabled in source peer - !source_links.is_empty() && !source_links.contains(&dest) - } - - #[inline] - fn failover_brokering(&self, peer1: ZenohId, peer2: ZenohId) -> bool { - self.router_peers_failover_brokering - && self - .peers_net - .as_ref() - .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) - .unwrap_or(false) - } - - fn schedule_compute_trees(&mut self, tables_ref: Arc, net_type: WhatAmI) { - log::trace!("Schedule computations"); - if (net_type == WhatAmI::Router && self.routers_trees_task.is_none()) - || (net_type == WhatAmI::Peer && self.peers_trees_task.is_none()) - { - let task = Some(async_std::task::spawn(async move { - async_std::task::sleep(std::time::Duration::from_millis(*TREES_COMPUTATION_DELAY)) - .await; - let mut tables = zwrite!(tables_ref.tables); - - log::trace!("Compute trees"); - let new_childs = match net_type { - WhatAmI::Router => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .compute_trees(), - _ => hat_mut!(tables).peers_net.as_mut().unwrap().compute_trees(), - }; - - log::trace!("Compute routes"); - pubsub::pubsub_tree_change(&mut tables, &new_childs, net_type); - queries::queries_tree_change(&mut tables, &new_childs, net_type); - - log::trace!("Computations completed"); - match net_type { - WhatAmI::Router => hat_mut!(tables).routers_trees_task = None, - _ => hat_mut!(tables).peers_trees_task = None, - }; - })); - match net_type { - WhatAmI::Router => self.routers_trees_task = task, - _ => self.peers_trees_task = task, - }; - } + fn new() -> Self { + Self { gossip: None } } } @@ -281,48 +99,23 @@ impl HatBaseTrait for HatCode { } else { WhatAmIMatcher::empty() }; - - let router_full_linkstate = whatami == WhatAmI::Router; - let peer_full_linkstate = whatami != WhatAmI::Client - && unwrap_or_default!(config.routing().peer().mode()) == *"linkstate"; let router_peers_failover_brokering = unwrap_or_default!(config.routing().router().peers_failover_brokering()); drop(config); - if router_full_linkstate | gossip { - hat_mut!(tables).routers_net = Some(Network::new( - "[Routers network]".to_string(), - tables.zid, - runtime.clone(), - router_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if peer_full_linkstate | gossip { - hat_mut!(tables).peers_net = Some(Network::new( - "[Peers network]".to_string(), - tables.zid, - runtime, - peer_full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - )); - } - if router_full_linkstate && peer_full_linkstate { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } + hat_mut!(tables).gossip = Some(Network::new( + "[Gossip]".to_string(), + tables.zid, + runtime, + router_peers_failover_brokering, + gossip, + gossip_multihop, + autoconnect, + )); } - fn new_tables(&self, router_peers_failover_brokering: bool) -> Box { - Box::new(HatTables::new(router_peers_failover_brokering)) + fn new_tables(&self, _router_peers_failover_brokering: bool) -> Box { + Box::new(HatTables::new()) } fn new_face(&self) -> Box { @@ -347,52 +140,17 @@ impl HatBaseTrait for HatCode { fn new_transport_unicast_face( &self, tables: &mut Tables, - tables_ref: &Arc, + _tables_ref: &Arc, face: &mut Face, transport: &TransportUnicast, ) -> ZResult<()> { - let link_id = match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.add_link(transport.clone()) - } else { - 0 - } + if face.state.whatami != WhatAmI::Client { + if let Some(net) = hat_mut!(tables).gossip.as_mut() { + net.add_link(transport.clone()); } - _ => 0, - }; - - if hat!(tables).full_net(WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); } - - face_hat_mut!(&mut face.state).link_id = link_id; pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); - - match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat_mut!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } - } - _ => (), - } Ok(()) } @@ -501,7 +259,7 @@ impl HatBaseTrait for HatCode { fn handle_oam( &self, tables: &mut Tables, - tables_ref: &Arc, + _tables_ref: &Arc, oam: Oam, transport: &TransportUnicast, ) -> ZResult<()> { @@ -515,74 +273,10 @@ impl HatBaseTrait for HatCode { let list: LinkStateList = codec.read(&mut reader).unwrap(); let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .link_states(list.link_states, zid) - .removed_nodes - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); + if whatami != WhatAmI::Client { + if let Some(net) = hat_mut!(tables).gossip.as_mut() { + net.link_states(list.link_states, zid); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - let changes = net.link_states(list.link_states, zid); - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in changes.removed_nodes { - pubsub_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - queries_remove_node( - tables, - &removed_node.zid, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else { - for (_, updated_node) in changes.updated_nodes { - pubsub_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - queries_linkstate_change( - tables, - &updated_node.zid, - &updated_node.links, - ); - } - } - } - } - _ => (), }; } } @@ -593,103 +287,23 @@ impl HatBaseTrait for HatCode { fn map_routing_context( &self, - tables: &Tables, - face: &FaceState, - routing_context: NodeId, + _tables: &Tables, + _face: &FaceState, + _routing_context: NodeId, ) -> NodeId { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - }, - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - } + 0 } fn closing( &self, tables: &mut Tables, - tables_ref: &Arc, + _tables_ref: &Arc, transport: &TransportUnicast, ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - for (_, removed_node) in hat_mut!(tables) - .routers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Router); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(tables).full_net(WhatAmI::Peer) { - for (_, removed_node) in hat_mut!(tables) - .peers_net - .as_mut() - .unwrap() - .remove_link(&zid) - { - pubsub_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } - - hat_mut!(tables) - .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); - } else if let Some(net) = hat_mut!(tables).peers_net.as_mut() { - net.remove_link(&zid); - } - } - _ => (), + if whatami != WhatAmI::Client { + hat_mut!(tables).gossip.as_mut().unwrap().remove_link(&zid); }; } (_, _) => log::error!("Closed transport in session closing!"), @@ -702,72 +316,35 @@ impl HatBaseTrait for HatCode { } #[inline] - fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(face.zid), - ) + fn ingress_filter(&self, _tables: &Tables, _face: &FaceState, _expr: &mut RoutingExpr) -> bool { + true } #[inline] fn egress_filter( &self, - tables: &Tables, + _tables: &Tables, src_face: &FaceState, out_face: &Arc, - expr: &mut RoutingExpr, + _expr: &mut RoutingExpr, ) -> bool { - if src_face.id != out_face.id + src_face.id != out_face.id && match (src_face.mcast_group.as_ref(), out_face.mcast_group.as_ref()) { (Some(l), Some(r)) => l != r, _ => true, } - { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer - || hat!(tables).peers_net.is_none() - || tables.zid - == *hat!(tables).elect_router( - &tables.zid, - expr.full_expr(), - hat!(tables).get_router_links(out_face.zid), - ); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || out_face.whatami != WhatAmI::Peer - || hat!(tables).full_net(WhatAmI::Peer) - || hat!(tables).failover_brokering(src_face.zid, out_face.zid)); - } - false } } -struct HatContext { - router_subs: HashSet, - peer_subs: HashSet, - router_qabls: HashMap, - peer_qabls: HashMap, -} +struct HatContext {} impl HatContext { fn new() -> Self { - Self { - router_subs: HashSet::new(), - peer_subs: HashSet::new(), - router_qabls: HashMap::new(), - peer_qabls: HashMap::new(), - } + Self {} } } struct HatFace { - link_id: usize, local_subs: HashSet>, remote_subs: HashSet>, local_qabls: HashMap, QueryableInfo>, @@ -777,7 +354,6 @@ struct HatFace { impl HatFace { fn new() -> Self { Self { - link_id: 0, local_subs: HashSet::new(), remote_subs: HashSet::new(), local_qabls: HashMap::new(), @@ -786,58 +362,4 @@ impl HatFace { } } -fn get_router(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - match hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_link(face_hat!(face).link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - face - ); - None - } - } -} - -fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option { - match hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_link(face_hat!(face).link_id) - { - Some(link) => match link.get_zid(&(nodeid as u64)) { - Some(router) => Some(*router), - None => { - log::error!( - "Received peer declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in peers network for {}", - face - ); - None - } - } -} - impl HatTrait for HatCode {} diff --git a/zenoh/src/net/routing/hat/p2p_peer/network.rs b/zenoh/src/net/routing/hat/p2p_peer/network.rs deleted file mode 100644 index 421850dc87..0000000000 --- a/zenoh/src/net/routing/hat/p2p_peer/network.rs +++ /dev/null @@ -1,1007 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use crate::net::codec::Zenoh080Routing; -use crate::net::protocol::linkstate::{LinkState, LinkStateList}; -use crate::net::routing::dispatcher::tables::NodeId; -use crate::net::runtime::Runtime; -use async_std::task; -use petgraph::graph::NodeIndex; -use petgraph::visit::{IntoNodeReferences, VisitMap, Visitable}; -use std::convert::TryInto; -use vec_map::VecMap; -use zenoh_buffers::writer::{DidntWrite, HasWriter}; -use zenoh_buffers::ZBuf; -use zenoh_codec::WCodec; -use zenoh_link::Locator; -use zenoh_protocol::common::ZExtBody; -use zenoh_protocol::core::{WhatAmI, WhatAmIMatcher, ZenohId}; -use zenoh_protocol::network::oam::id::OAM_LINKSTATE; -use zenoh_protocol::network::{oam, NetworkBody, NetworkMessage, Oam}; -use zenoh_transport::TransportUnicast; - -#[derive(Clone)] -struct Details { - zid: bool, - locators: bool, - links: bool, -} - -#[derive(Clone)] -pub(super) struct Node { - pub(super) zid: ZenohId, - pub(super) whatami: Option, - pub(super) locators: Option>, - pub(super) sn: u64, - pub(super) links: Vec, -} - -impl std::fmt::Debug for Node { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.zid) - } -} - -pub(super) struct Link { - pub(super) transport: TransportUnicast, - zid: ZenohId, - mappings: VecMap, - local_mappings: VecMap, -} - -impl Link { - fn new(transport: TransportUnicast) -> Self { - let zid = transport.get_zid().unwrap(); - Link { - transport, - zid, - mappings: VecMap::new(), - local_mappings: VecMap::new(), - } - } - - #[inline] - pub(super) fn set_zid_mapping(&mut self, psid: u64, zid: ZenohId) { - self.mappings.insert(psid.try_into().unwrap(), zid); - } - - #[inline] - pub(super) fn get_zid(&self, psid: &u64) -> Option<&ZenohId> { - self.mappings.get((*psid).try_into().unwrap()) - } - - #[inline] - pub(super) fn set_local_psid_mapping(&mut self, psid: u64, local_psid: u64) { - self.local_mappings - .insert(psid.try_into().unwrap(), local_psid); - } - - #[inline] - pub(super) fn get_local_psid(&self, psid: &u64) -> Option<&u64> { - self.local_mappings.get((*psid).try_into().unwrap()) - } -} - -pub(super) struct Changes { - pub(super) updated_nodes: Vec<(NodeIndex, Node)>, - pub(super) removed_nodes: Vec<(NodeIndex, Node)>, -} - -#[derive(Clone)] -pub(super) struct Tree { - pub(super) parent: Option, - pub(super) childs: Vec, - pub(super) directions: Vec>, -} - -pub(super) struct Network { - pub(super) name: String, - pub(super) full_linkstate: bool, - pub(super) router_peers_failover_brokering: bool, - pub(super) gossip: bool, - pub(super) gossip_multihop: bool, - pub(super) autoconnect: WhatAmIMatcher, - pub(super) idx: NodeIndex, - pub(super) links: VecMap, - pub(super) trees: Vec, - pub(super) distances: Vec, - pub(super) graph: petgraph::stable_graph::StableUnGraph, - pub(super) runtime: Runtime, -} - -impl Network { - #[allow(clippy::too_many_arguments)] - pub(super) fn new( - name: String, - zid: ZenohId, - runtime: Runtime, - full_linkstate: bool, - router_peers_failover_brokering: bool, - gossip: bool, - gossip_multihop: bool, - autoconnect: WhatAmIMatcher, - ) -> Self { - let mut graph = petgraph::stable_graph::StableGraph::default(); - log::debug!("{} Add node (self) {}", name, zid); - let idx = graph.add_node(Node { - zid, - whatami: Some(runtime.whatami), - locators: None, - sn: 1, - links: vec![], - }); - Network { - name, - full_linkstate, - router_peers_failover_brokering, - gossip, - gossip_multihop, - autoconnect, - idx, - links: VecMap::new(), - trees: vec![Tree { - parent: None, - childs: vec![], - directions: vec![None], - }], - distances: vec![0.0], - graph, - runtime, - } - } - - //noinspection ALL - // pub(super) fn dot(&self) -> String { - // std::format!( - // "{:?}", - // petgraph::dot::Dot::with_config(&self.graph, &[petgraph::dot::Config::EdgeNoLabel]) - // ) - // } - - #[inline] - pub(super) fn get_node(&self, zid: &ZenohId) -> Option<&Node> { - self.graph.node_weights().find(|weight| weight.zid == *zid) - } - - #[inline] - pub(super) fn get_idx(&self, zid: &ZenohId) -> Option { - self.graph - .node_indices() - .find(|idx| self.graph[*idx].zid == *zid) - } - - #[inline] - pub(super) fn get_link(&self, id: usize) -> Option<&Link> { - self.links.get(id) - } - - #[inline] - pub(super) fn get_link_from_zid(&self, zid: &ZenohId) -> Option<&Link> { - self.links.values().find(|link| link.zid == *zid) - } - - #[inline] - pub(super) fn get_local_context(&self, context: NodeId, link_id: usize) -> NodeId { - match self.get_link(link_id) { - Some(link) => match link.get_local_psid(&(context as u64)) { - Some(psid) => (*psid).try_into().unwrap_or(0), - None => { - log::error!( - "Cannot find local psid for context {} on link {}", - context, - link_id - ); - 0 - } - }, - None => { - log::error!("Cannot find link {}", link_id); - 0 - } - } - } - - fn add_node(&mut self, node: Node) -> NodeIndex { - let zid = node.zid; - let idx = self.graph.add_node(node); - for link in self.links.values_mut() { - if let Some((psid, _)) = link.mappings.iter().find(|(_, p)| **p == zid) { - link.local_mappings.insert(psid, idx.index() as u64); - } - } - idx - } - - fn make_link_state(&self, idx: NodeIndex, details: Details) -> LinkState { - let links = if details.links { - self.graph[idx] - .links - .iter() - .filter_map(|zid| { - if let Some(idx2) = self.get_idx(zid) { - Some(idx2.index().try_into().unwrap()) - } else { - log::error!( - "{} Internal error building link state: cannot get index of {}", - self.name, - zid - ); - None - } - }) - .collect() - } else { - vec![] - }; - LinkState { - psid: idx.index().try_into().unwrap(), - sn: self.graph[idx].sn, - zid: if details.zid { - Some(self.graph[idx].zid) - } else { - None - }, - whatami: self.graph[idx].whatami, - locators: if details.locators { - if idx == self.idx { - Some(self.runtime.get_locators()) - } else { - self.graph[idx].locators.clone() - } - } else { - None - }, - links, - } - } - - fn make_msg(&self, idxs: Vec<(NodeIndex, Details)>) -> Result { - let mut link_states = vec![]; - for (idx, details) in idxs { - link_states.push(self.make_link_state(idx, details)); - } - let codec = Zenoh080Routing::new(); - let mut buf = ZBuf::empty(); - codec.write(&mut buf.writer(), &LinkStateList { link_states })?; - Ok(NetworkBody::OAM(Oam { - id: OAM_LINKSTATE, - body: ZExtBody::ZBuf(buf), - ext_qos: oam::ext::QoSType::oam_default(), - ext_tstamp: None, - }) - .into()) - } - - fn send_on_link(&self, idxs: Vec<(NodeIndex, Details)>, transport: &TransportUnicast) { - if let Ok(msg) = self.make_msg(idxs) { - log::trace!("{} Send to {:?} {:?}", self.name, transport.get_zid(), msg); - if let Err(e) = transport.schedule(msg) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); - } - } else { - log::error!("Failed to encode Linkstate message"); - } - } - - fn send_on_links

(&self, idxs: Vec<(NodeIndex, Details)>, mut parameters: P) - where - P: FnMut(&Link) -> bool, - { - if let Ok(msg) = self.make_msg(idxs) { - for link in self.links.values() { - if parameters(link) { - log::trace!("{} Send to {} {:?}", self.name, link.zid, msg); - if let Err(e) = link.transport.schedule(msg.clone()) { - log::debug!("{} Error sending LinkStateList: {}", self.name, e); - } - } - } - } else { - log::error!("Failed to encode Linkstate message"); - } - } - - // Indicates if locators should be included when propagating Linkstate message - // from the given node. - // Returns true if gossip is enabled and if multihop gossip is enabled or - // the node is one of self neighbours. - fn propagate_locators(&self, idx: NodeIndex) -> bool { - self.gossip - && (self.gossip_multihop - || idx == self.idx - || self.links.values().any(|link| { - self.graph - .node_weight(idx) - .map(|node| link.zid == node.zid) - .unwrap_or(true) - })) - } - - fn update_edge(&mut self, idx1: NodeIndex, idx2: NodeIndex) { - use std::hash::Hasher; - let mut hasher = std::collections::hash_map::DefaultHasher::default(); - if self.graph[idx1].zid > self.graph[idx2].zid { - hasher.write(&self.graph[idx2].zid.to_le_bytes()); - hasher.write(&self.graph[idx1].zid.to_le_bytes()); - } else { - hasher.write(&self.graph[idx1].zid.to_le_bytes()); - hasher.write(&self.graph[idx2].zid.to_le_bytes()); - } - let weight = 100.0 + ((hasher.finish() as u32) as f64) / u32::MAX as f64; - self.graph.update_edge(idx1, idx2, weight); - } - - pub(super) fn link_states(&mut self, link_states: Vec, src: ZenohId) -> Changes { - log::trace!("{} Received from {} raw: {:?}", self.name, src, link_states); - - let graph = &self.graph; - let links = &mut self.links; - - let src_link = match links.values_mut().find(|link| link.zid == src) { - Some(link) => link, - None => { - log::error!( - "{} Received LinkStateList from unknown link {}", - self.name, - src - ); - return Changes { - updated_nodes: vec![], - removed_nodes: vec![], - }; - } - }; - - // register psid<->zid mappings & apply mapping to nodes - #[allow(clippy::needless_collect)] // need to release borrow on self - let link_states = link_states - .into_iter() - .filter_map(|link_state| { - if let Some(zid) = link_state.zid { - src_link.set_zid_mapping(link_state.psid, zid); - if let Some(idx) = graph.node_indices().find(|idx| graph[*idx].zid == zid) { - src_link.set_local_psid_mapping(link_state.psid, idx.index() as u64); - } - Some(( - zid, - link_state.whatami.unwrap_or(WhatAmI::Router), - link_state.locators, - link_state.sn, - link_state.links, - )) - } else { - match src_link.get_zid(&link_state.psid) { - Some(zid) => Some(( - *zid, - link_state.whatami.unwrap_or(WhatAmI::Router), - link_state.locators, - link_state.sn, - link_state.links, - )), - None => { - log::error!( - "Received LinkState from {} with unknown node mapping {}", - src, - link_state.psid - ); - None - } - } - } - }) - .collect::>(); - - // apply psid<->zid mapping to links - let src_link = self.get_link_from_zid(&src).unwrap(); - let link_states = link_states - .into_iter() - .map(|(zid, wai, locs, sn, links)| { - let links: Vec = links - .iter() - .filter_map(|l| { - if let Some(zid) = src_link.get_zid(l) { - Some(*zid) - } else { - log::error!( - "{} Received LinkState from {} with unknown link mapping {}", - self.name, - src, - l - ); - None - } - }) - .collect(); - (zid, wai, locs, sn, links) - }) - .collect::>(); - - // log::trace!( - // "{} Received from {} mapped: {:?}", - // self.name, - // src, - // link_states - // ); - for link_state in &link_states { - log::trace!( - "{} Received from {} mapped: {:?}", - self.name, - src, - link_state - ); - } - - if !self.full_linkstate { - let mut changes = Changes { - updated_nodes: vec![], - removed_nodes: vec![], - }; - for (zid, whatami, locators, sn, links) in link_states.into_iter() { - let idx = match self.get_idx(&zid) { - None => { - let idx = self.add_node(Node { - zid, - whatami: Some(whatami), - locators: locators.clone(), - sn, - links, - }); - changes.updated_nodes.push((idx, self.graph[idx].clone())); - locators.is_some().then_some(idx) - } - Some(idx) => { - let node = &mut self.graph[idx]; - let oldsn = node.sn; - (oldsn < sn) - .then(|| { - node.sn = sn; - node.links = links.clone(); - changes.updated_nodes.push((idx, node.clone())); - (node.locators != locators && locators.is_some()).then(|| { - node.locators = locators.clone(); - idx - }) - }) - .flatten() - } - }; - - if self.gossip { - if let Some(idx) = idx { - if self.gossip_multihop || self.links.values().any(|link| link.zid == zid) { - self.send_on_links( - vec![( - idx, - Details { - zid: true, - locators: true, - links: false, - }, - )], - |link| link.zid != zid, - ); - } - - if !self.autoconnect.is_empty() { - // Connect discovered peers - if task::block_on(self.runtime.manager().get_transport_unicast(&zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = locators { - let runtime = self.runtime.clone(); - self.runtime.spawn(async move { - // random backoff - async_std::task::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; - }); - } - } - } - } - } - } - return changes; - } - - // Add nodes to graph & filter out up to date states - let mut link_states = link_states - .into_iter() - .filter_map( - |(zid, whatami, locators, sn, links)| match self.get_idx(&zid) { - Some(idx) => { - let node = &mut self.graph[idx]; - let oldsn = node.sn; - if oldsn < sn { - node.sn = sn; - node.links = links.clone(); - if locators.is_some() { - node.locators = locators; - } - if oldsn == 0 { - Some((links, idx, true)) - } else { - Some((links, idx, false)) - } - } else { - None - } - } - None => { - let node = Node { - zid, - whatami: Some(whatami), - locators, - sn, - links: links.clone(), - }; - log::debug!("{} Add node (state) {}", self.name, zid); - let idx = self.add_node(node); - Some((links, idx, true)) - } - }, - ) - .collect::, NodeIndex, bool)>>(); - - // Add/remove edges from graph - let mut reintroduced_nodes = vec![]; - for (links, idx1, _) in &link_states { - for link in links { - if let Some(idx2) = self.get_idx(link) { - if self.graph[idx2].links.contains(&self.graph[*idx1].zid) { - log::trace!( - "{} Update edge (state) {} {}", - self.name, - self.graph[*idx1].zid, - self.graph[idx2].zid - ); - self.update_edge(*idx1, idx2); - } - } else { - let node = Node { - zid: *link, - whatami: None, - locators: None, - sn: 0, - links: vec![], - }; - log::debug!("{} Add node (reintroduced) {}", self.name, link.clone()); - let idx = self.add_node(node); - reintroduced_nodes.push((vec![], idx, true)); - } - } - let mut edges = vec![]; - let mut neighbors = self.graph.neighbors_undirected(*idx1).detach(); - while let Some(edge) = neighbors.next(&self.graph) { - edges.push(edge); - } - for (eidx, idx2) in edges { - if !links.contains(&self.graph[idx2].zid) { - log::trace!( - "{} Remove edge (state) {} {}", - self.name, - self.graph[*idx1].zid, - self.graph[idx2].zid - ); - self.graph.remove_edge(eidx); - } - } - } - link_states.extend(reintroduced_nodes); - - let removed = self.remove_detached_nodes(); - let link_states = link_states - .into_iter() - .filter(|ls| !removed.iter().any(|(idx, _)| idx == &ls.1)) - .collect::, NodeIndex, bool)>>(); - - if !self.autoconnect.is_empty() { - // Connect discovered peers - for (_, idx, _) in &link_states { - let node = &self.graph[*idx]; - if let Some(whatami) = node.whatami { - if task::block_on(self.runtime.manager().get_transport_unicast(&node.zid)) - .is_none() - && self.autoconnect.matches(whatami) - { - if let Some(locators) = &node.locators { - let runtime = self.runtime.clone(); - let zid = node.zid; - let locators = locators.clone(); - self.runtime.spawn(async move { - // random backoff - async_std::task::sleep(std::time::Duration::from_millis( - rand::random::() % 100, - )) - .await; - runtime.connect_peer(&zid, &locators).await; - }); - } - } - } - } - } - - // Propagate link states - // Note: we need to send all states at once for each face - // to avoid premature node deletion on the other side - #[allow(clippy::type_complexity)] - if !link_states.is_empty() { - let (new_idxs, updated_idxs): ( - Vec<(Vec, NodeIndex, bool)>, - Vec<(Vec, NodeIndex, bool)>, - ) = link_states.into_iter().partition(|(_, _, new)| *new); - let new_idxs = new_idxs - .into_iter() - .map(|(_, idx1, _new_node)| { - ( - idx1, - Details { - zid: true, - locators: self.propagate_locators(idx1), - links: true, - }, - ) - }) - .collect::>(); - for link in self.links.values() { - if link.zid != src { - let updated_idxs: Vec<(NodeIndex, Details)> = updated_idxs - .clone() - .into_iter() - .filter_map(|(_, idx1, _)| { - if link.zid != self.graph[idx1].zid { - Some(( - idx1, - Details { - zid: false, - locators: self.propagate_locators(idx1), - links: true, - }, - )) - } else { - None - } - }) - .collect(); - if !new_idxs.is_empty() || !updated_idxs.is_empty() { - self.send_on_link( - [&new_idxs[..], &updated_idxs[..]].concat(), - &link.transport, - ); - } - } else if !new_idxs.is_empty() { - self.send_on_link(new_idxs.clone(), &link.transport); - } - } - } - Changes { - updated_nodes: vec![], - removed_nodes: removed, - } - } - - pub(super) fn add_link(&mut self, transport: TransportUnicast) -> usize { - let free_index = { - let mut i = 0; - while self.links.contains_key(i) { - i += 1; - } - i - }; - self.links.insert(free_index, Link::new(transport.clone())); - - let zid = transport.get_zid().unwrap(); - let whatami = transport.get_whatami().unwrap(); - - if self.full_linkstate || self.router_peers_failover_brokering { - let (idx, new) = match self.get_idx(&zid) { - Some(idx) => (idx, false), - None => { - log::debug!("{} Add node (link) {}", self.name, zid); - ( - self.add_node(Node { - zid, - whatami: Some(whatami), - locators: None, - sn: 0, - links: vec![], - }), - true, - ) - } - }; - if self.full_linkstate && self.graph[idx].links.contains(&self.graph[self.idx].zid) { - log::trace!("Update edge (link) {} {}", self.graph[self.idx].zid, zid); - self.update_edge(self.idx, idx); - } - self.graph[self.idx].links.push(zid); - self.graph[self.idx].sn += 1; - - // Send updated self linkstate on all existing links except new one - self.links - .values() - .filter(|link| { - link.zid != zid - && (self.full_linkstate - || link.transport.get_whatami().unwrap_or(WhatAmI::Peer) - == WhatAmI::Router) - }) - .for_each(|link| { - self.send_on_link( - if new || (!self.full_linkstate && !self.gossip_multihop) { - vec![ - ( - idx, - Details { - zid: true, - locators: false, - links: false, - }, - ), - ( - self.idx, - Details { - zid: false, - locators: self.propagate_locators(idx), - links: true, - }, - ), - ] - } else { - vec![( - self.idx, - Details { - zid: false, - locators: self.propagate_locators(idx), - links: true, - }, - )] - }, - &link.transport, - ) - }); - } - - // Send all nodes linkstate on new link - let idxs = self - .graph - .node_indices() - .filter_map(|idx| { - (self.full_linkstate - || self.gossip_multihop - || self.links.values().any(|link| link.zid == zid) - || (self.router_peers_failover_brokering - && idx == self.idx - && whatami == WhatAmI::Router)) - .then(|| { - ( - idx, - Details { - zid: true, - locators: self.propagate_locators(idx), - links: self.full_linkstate - || (self.router_peers_failover_brokering - && idx == self.idx - && whatami == WhatAmI::Router), - }, - ) - }) - }) - .collect(); - self.send_on_link(idxs, &transport); - free_index - } - - pub(super) fn remove_link(&mut self, zid: &ZenohId) -> Vec<(NodeIndex, Node)> { - log::trace!("{} remove_link {}", self.name, zid); - self.links.retain(|_, link| link.zid != *zid); - self.graph[self.idx].links.retain(|link| *link != *zid); - - if self.full_linkstate { - if let Some((edge, _)) = self - .get_idx(zid) - .and_then(|idx| self.graph.find_edge_undirected(self.idx, idx)) - { - self.graph.remove_edge(edge); - } - let removed = self.remove_detached_nodes(); - - self.graph[self.idx].sn += 1; - - self.send_on_links( - vec![( - self.idx, - Details { - zid: false, - locators: self.gossip, - links: true, - }, - )], - |_| true, - ); - - removed - } else { - if let Some(idx) = self.get_idx(zid) { - self.graph.remove_node(idx); - } - if self.router_peers_failover_brokering { - self.send_on_links( - vec![( - self.idx, - Details { - zid: false, - locators: self.gossip, - links: true, - }, - )], - |link| { - link.zid != *zid - && link.transport.get_whatami().unwrap_or(WhatAmI::Peer) - == WhatAmI::Router - }, - ); - } - vec![] - } - } - - fn remove_detached_nodes(&mut self) -> Vec<(NodeIndex, Node)> { - let mut dfs_stack = vec![self.idx]; - let mut visit_map = self.graph.visit_map(); - while let Some(node) = dfs_stack.pop() { - if visit_map.visit(node) { - for succzid in &self.graph[node].links { - if let Some(succ) = self.get_idx(succzid) { - if !visit_map.is_visited(&succ) { - dfs_stack.push(succ); - } - } - } - } - } - - let mut removed = vec![]; - for idx in self.graph.node_indices().collect::>() { - if !visit_map.is_visited(&idx) { - log::debug!("Remove node {}", &self.graph[idx].zid); - removed.push((idx, self.graph.remove_node(idx).unwrap())); - } - } - removed - } - - pub(super) fn compute_trees(&mut self) -> Vec> { - let indexes = self.graph.node_indices().collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - let old_childs: Vec> = self.trees.iter().map(|t| t.childs.clone()).collect(); - - self.trees.clear(); - self.trees.resize_with(max_idx.index() + 1, || Tree { - parent: None, - childs: vec![], - directions: vec![], - }); - - for tree_root_idx in &indexes { - let paths = petgraph::algo::bellman_ford(&self.graph, *tree_root_idx).unwrap(); - - if tree_root_idx.index() == 0 { - self.distances = paths.distances; - } - - if log::log_enabled!(log::Level::Debug) { - let ps: Vec> = paths - .predecessors - .iter() - .enumerate() - .map(|(is, o)| { - o.map(|ip| { - format!( - "{} <- {}", - self.graph[ip].zid, - self.graph[NodeIndex::new(is)].zid - ) - }) - }) - .collect(); - log::debug!("Tree {} {:?}", self.graph[*tree_root_idx].zid, ps); - } - - self.trees[tree_root_idx.index()].parent = paths.predecessors[self.idx.index()]; - - for idx in &indexes { - if let Some(parent_idx) = paths.predecessors[idx.index()] { - if parent_idx == self.idx { - self.trees[tree_root_idx.index()].childs.push(*idx); - } - } - } - - self.trees[tree_root_idx.index()] - .directions - .resize_with(max_idx.index() + 1, || None); - let mut dfs = petgraph::algo::DfsSpace::new(&self.graph); - for destination in &indexes { - if self.idx != *destination - && petgraph::algo::has_path_connecting( - &self.graph, - self.idx, - *destination, - Some(&mut dfs), - ) - { - let mut direction = None; - let mut current = *destination; - while let Some(parent) = paths.predecessors[current.index()] { - if parent == self.idx { - direction = Some(current); - break; - } else { - current = parent; - } - } - - self.trees[tree_root_idx.index()].directions[destination.index()] = - match direction { - Some(direction) => Some(direction), - None => self.trees[tree_root_idx.index()].parent, - }; - } - } - } - - let mut new_childs = Vec::with_capacity(self.trees.len()); - new_childs.resize(self.trees.len(), vec![]); - - for i in 0..new_childs.len() { - new_childs[i] = if i < old_childs.len() { - self.trees[i] - .childs - .iter() - .filter(|idx| !old_childs[i].contains(idx)) - .cloned() - .collect() - } else { - self.trees[i].childs.clone() - }; - } - - new_childs - } - - #[inline] - pub(super) fn get_links(&self, node: ZenohId) -> &[ZenohId] { - self.get_node(&node) - .map(|node| &node.links[..]) - .unwrap_or_default() - } -} - -#[inline] -pub(super) fn shared_nodes(net1: &Network, net2: &Network) -> Vec { - net1.graph - .node_references() - .filter_map(|(_, node1)| { - net2.graph - .node_references() - .any(|(_, node2)| node1.zid == node2.zid) - .then_some(node1.zid) - }) - .collect() -} diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 189e6cb6e8..8c501c5897 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{face_hat, face_hat_mut}; +use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -21,14 +20,13 @@ use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, Rou use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; -use petgraph::graph::NodeIndex; use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; use zenoh_core::zread; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -36,79 +34,18 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -#[inline] -fn send_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - sub_info: &SubscriberInfo, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - #[inline] fn propagate_simple_subscription_to( - tables: &mut Tables, + _tables: &mut Tables, dst_face: &mut Arc, res: &Arc, sub_info: &SubscriberInfo, src_face: &mut Arc, - full_peer_net: bool, ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !face_hat!(dst_face).local_subs.contains(res) - && match tables.whatami { - WhatAmI::Router => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) - } - } - WhatAmI::Peer => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client - } - } - _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, - } + && src_face.whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client { face_hat_mut!(dst_face).local_subs.insert(res.clone()); let key_expr = Resource::decl_key(res, dst_face); @@ -134,233 +71,13 @@ fn propagate_simple_subscription( sub_info: &SubscriberInfo, src_face: &mut Arc, ) { - let full_peer_net = hat!(tables).full_net(WhatAmI::Peer); for mut dst_face in tables .faces .values() .cloned() .collect::>>() { - propagate_simple_subscription_to( - tables, - &mut dst_face, - res, - sub_info, - src_face, - full_peer_net, - ); - } -} - -fn propagate_sourced_subscription( - tables: &Tables, - res: &Arc, - sub_info: &SubscriberInfo, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - sub_info, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - if !res_hat!(res).router_subs.contains(&router) { - // Register router subscription - { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.insert(router); - hat_mut!(tables).router_subs.insert(res.clone()); - } - - // Propagate subscription to routers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); - } - // Propagate subscription to peers - if hat!(tables).full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) - } - - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); -} - -fn declare_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_subscription(&mut wtables, face, &mut res, sub_info, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } -} - -fn register_peer_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - if !res_hat!(res).peer_subs.contains(&peer) { - // Register peer subscription - { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - res_hat_mut!(res).peer_subs.insert(peer); - hat_mut!(tables).peer_subs.insert(res.clone()); - } - - // Propagate subscription to peers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); - } -} - -fn declare_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - if wtables.whatami == WhatAmI::Router { - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), + propagate_simple_subscription_to(tables, &mut dst_face, res, sub_info, src_face); } } @@ -440,75 +157,29 @@ fn declare_client_subscription( register_client_subscription(&mut wtables, face, &mut res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - match wtables.whatami { - WhatAmI::Router => { - let zid = wtables.zid; - register_router_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let zid = wtables.zid; - register_peer_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } else { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } - _ => { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } + + propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &wtables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } + disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -528,24 +199,6 @@ fn declare_client_subscription( } } -#[inline] -fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_subs - .iter() - .any(|peer| peer != &tables.zid) -} - -#[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .peer_subs - .iter() - .any(|peer| peer != &tables.zid) -} - #[inline] fn client_subs(res: &Arc) -> Vec> { res.session_ctxs @@ -560,46 +213,6 @@ fn client_subs(res: &Arc) -> Vec> { .collect() } -#[inline] -fn send_forget_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { for face in tables.faces.values_mut() { if face_hat!(face).local_subs.contains(res) { @@ -621,224 +234,6 @@ fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc } } -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_subs.len() == 1 - && res_hat!(res).router_subs.contains(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_subs.contains(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_subs.remove(res); - } - } - } -} - -fn propagate_forget_sourced_subscription( - tables: &Tables, - res: &Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - Some(tree_sid.index() as NodeId), - ); - } else { - log::trace!( - "Propagating forget sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); - res_hat_mut!(res).router_subs.retain(|sub| sub != router); - - if res_hat!(res).router_subs.is_empty() { - hat_mut!(tables) - .router_subs - .retain(|sub| !Arc::ptr_eq(sub, res)); - - if hat_mut!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_subscription(tables, res); - } - - propagate_forget_simple_subscription_to_peers(tables, res); -} - -fn undeclare_router_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_subs.contains(router) { - unregister_router_subscription(tables, res, router); - propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router subscription!"), - }, - None => log::error!("Undeclare router subscription with unknown scope!"), - } -} - -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); - res_hat_mut!(res).peer_subs.retain(|sub| sub != peer); - - if res_hat!(res).peer_subs.is_empty() { - hat_mut!(tables) - .peer_subs - .retain(|sub| !Arc::ptr_eq(sub, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); - } - } -} - -fn undeclare_peer_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res_hat!(res).peer_subs.contains(peer) { - unregister_peer_subscription(tables, res, peer); - propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); - } -} - -fn forget_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer subscription!"), - }, - None => log::error!("Undeclare peer subscription with unknown scope!"), - } -} - pub(super) fn undeclare_client_subscription( tables: &mut Tables, face: &mut Arc, @@ -851,32 +246,10 @@ pub(super) fn undeclare_client_subscription( face_hat_mut!(face).remote_subs.remove(res); let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if client_subs.is_empty() { - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription(tables, res); - } - } - } - _ => { - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - } + if client_subs.is_empty() { + propagate_forget_simple_subscription(tables, res); } - if client_subs.len() == 1 && !router_subs && !peer_subs { + if client_subs.len() == 1 { let face = &mut client_subs[0]; if face_hat!(face).local_subs.contains(res) && !(face.whatami == WhatAmI::Client && res.expr().starts_with(PREFIX_LIVELINESS)) @@ -939,336 +312,15 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, }; - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } - WhatAmI::Client => { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } -} - -pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - for mut res in hat!(tables) - .router_subs - .iter() - .filter(|res| res_hat!(res).router_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_router_subscription(tables, &mut res, node); - - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - WhatAmI::Peer => { - for mut res in hat!(tables) - .peer_subs - .iter() - .filter(|res| res_hat!(res).peer_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_peer_subscription(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); - } - } - - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(super) fn pubsub_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let subs_res = match net_type { - WhatAmI::Router => &hat!(tables).router_subs, - _ => &hat!(tables).peer_subs, - }; - - for res in subs_res { - let subs = match net_type { - WhatAmI::Router => &res_hat!(res).router_subs, - _ => &res_hat!(res).peer_subs, - }; - for sub in subs { - if *sub == tree_id { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, - }; - send_sourced_subscription_to_net_childs( - tables, - net, - tree_childs, - res, - None, - &sub_info, - tree_sid as NodeId, - ); - } - } - } - } - } - } - - // recompute routes - compute_data_routes_from(tables, &mut tables.root_res.clone()); -} - -pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid).cloned() { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_subs { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_subs.contains(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); - - face_hat_mut!(dst_face).local_subs.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - face_hat_mut!(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // TODO - mode: Mode::Push, - }; - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - res.expr(), - )); - } - } - } - } - } - } - } -} - -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - subs: &HashSet, -) { - if net.trees.len() > source as usize { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source as usize].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[sub_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - (face.clone(), key_expr.to_owned(), source) - }); - } - } - } - } - } + for src_face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for sub in &face_hat!(src_face).remote_subs { + propagate_simple_subscription_to(tables, face, sub, &sub_info, &mut src_face.clone()); } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); } } @@ -1279,28 +331,10 @@ impl HatPubSubTrait for HatCode { face: &mut Arc, expr: &WireExpr, sub_info: &SubscriberInfo, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_subscription(tables, rtables, face, expr, sub_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) - } - } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) - } - } - _ => declare_client_subscription(tables, rtables, face, expr, sub_info), - } + declare_client_subscription(tables, rtables, face, expr, sub_info); } fn forget_subscription( @@ -1308,28 +342,10 @@ impl HatPubSubTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_subscription(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer) - } - } else { - forget_client_subscription(tables, rtables, face, expr) - } - } - _ => forget_client_subscription(tables, rtables, face, expr), - } + forget_client_subscription(tables, rtables, face, expr); } fn compute_data_route( @@ -1364,82 +380,23 @@ impl HatPubSubTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_subs, - ); - } - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), NodeId::default()) - }); + for (sid, context) in &mres.session_ctxs { + if let Some(subinfo) = &context.subs { + if match tables.whatami { + WhatAmI::Router => context.face.whatami != WhatAmI::Router, + _ => { + source_type == WhatAmI::Client + || context.face.whatami == WhatAmI::Client } + } && subinfo.mode == Mode::Push + { + route.entry(*sid).or_insert_with(|| { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + (context.face.clone(), key_expr.to_owned(), NodeId::default()) + }); } } } @@ -1493,73 +450,8 @@ impl HatPubSubTrait for HatCode { client_data_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + routes.client_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -1569,81 +461,14 @@ impl HatPubSubTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + res_mut.context_mut().client_data_route = Some(self.compute_data_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); - for idx in &indexes { - peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index c1093a8a00..41ef4feb0d 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -11,9 +11,8 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; -use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; +use super::{face_hat, face_hat_mut}; +use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; @@ -24,7 +23,6 @@ use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; -use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; use std::sync::{Arc, RwLockReadGuard}; @@ -32,7 +30,7 @@ use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + core::{key_expr::keyexpr, WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -56,120 +54,11 @@ fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableI this } -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if hat!(tables).full_net(WhatAmI::Peer) { - res.context.as_ref().and_then(|_| { - res_hat!(res) - .peer_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res_hat!(res) - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { - let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res_hat!(res) - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - if res.context.is_some() && hat!(tables).full_net(WhatAmI::Peer) { - info = res_hat!(res) - .peer_qabls - .iter() - .fold(info, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } +fn local_qabl_info(_tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { res.session_ctxs .values() - .fold(info, |accu, ctx| { - if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer - || face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(ctx.face.zid, face.zid) - { + .fold(None, |accu, ctx| { + if ctx.face.id != face.id { if let Some(info) = ctx.qabl.as_ref() { Some(match accu { Some(accu) => merge_qabl_infos(accu, info), @@ -188,91 +77,20 @@ fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) }) } -#[allow(clippy::too_many_arguments)] -#[inline] -fn send_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_simple_queryable( tables: &mut Tables, res: &Arc, src_face: Option<&mut Arc>, ) { - let full_peers_net = hat!(tables).full_net(WhatAmI::Peer); let faces = tables.faces.values().cloned(); for mut dst_face in faces { let info = local_qabl_info(tables, res, &dst_face); let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) - && match tables.whatami { - WhatAmI::Router => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering( - src_face.as_ref().unwrap().zid, - dst_face.zid, - )) - } - } - WhatAmI::Peer => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - _ => { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami == WhatAmI::Client + || dst_face.whatami == WhatAmI::Client) { face_hat_mut!(&mut dst_face) .local_qabls @@ -295,234 +113,6 @@ fn propagate_simple_queryable( } } -fn propagate_sourced_queryable( - tables: &Tables, - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - qabl_info, - src_face, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - let current_info = res_hat!(res).router_qabls.get(&router); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register router queryable - { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.insert(router, *qabl_info); - hat_mut!(tables).router_qabls.insert(res.clone()); - } - - // Propagate queryable to routers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &router, - WhatAmI::Router, - ); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - // Propagate queryable to peers - if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) - } - } - - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); -} - -fn declare_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register router queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - -fn register_peer_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - let current_info = res_hat!(res).peer_qabls.get(&peer); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register peer queryable - { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.insert(peer, *qabl_info); - hat_mut!(tables).peer_qabls.insert(res.clone()); - } - - // Propagate queryable to peers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &peer, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); - } -} - -fn declare_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register peer queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - let mut face = Some(face); - register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - if wtables.whatami == WhatAmI::Router { - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - } - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - fn register_client_queryable( _tables: &mut Tables, face: &mut Arc, @@ -583,38 +173,7 @@ fn declare_client_queryable( }; register_client_queryable(&mut wtables, face, &mut res, qabl_info); - - match wtables.whatami { - WhatAmI::Router => { - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } else { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - _ => { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } + propagate_simple_queryable(&mut wtables, &res, Some(face)); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -634,24 +193,6 @@ fn declare_client_queryable( } } -#[inline] -fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .router_qabls - .keys() - .any(|router| router != &tables.zid) -} - -#[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res_hat!(res) - .peer_qabls - .keys() - .any(|peer| peer != &tables.zid) -} - #[inline] fn client_qabls(res: &Arc) -> Vec> { res.session_ctxs @@ -666,46 +207,6 @@ fn client_qabls(res: &Arc) -> Vec> { .collect() } -#[inline] -fn send_forget_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: NodeId, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context, - }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { for face in tables.faces.values_mut() { if face_hat!(face).local_qabls.contains_key(res) { @@ -728,225 +229,6 @@ fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - if !hat!(tables).full_net(WhatAmI::Peer) - && res_hat!(res).router_qabls.len() == 1 - && res_hat!(res).router_qabls.contains_key(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face_hat!(face).local_qabls.contains_key(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables).failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }, - res.expr(), - )); - - face_hat_mut!(&mut face).local_qabls.remove(res); - } - } - } -} - -fn propagate_forget_sourced_queryable( - tables: &mut Tables, - res: &mut Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = hat!(tables).get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - tree_sid.index() as NodeId, - ); - } else { - log::trace!( - "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); - res_hat_mut!(res).router_qabls.remove(router); - - if res_hat!(res).router_qabls.is_empty() { - hat_mut!(tables) - .router_qabls - .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_queryable(tables, res); - } - - propagate_forget_simple_queryable_to_peers(tables, res); -} - -fn undeclare_router_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res_hat!(res).router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); - propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); - } -} - -fn forget_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router queryable!"), - }, - None => log::error!("Undeclare router queryable with unknown scope!"), - } -} - -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - res_hat_mut!(res).peer_qabls.remove(peer); - - if res_hat!(res).peer_qabls.is_empty() { - hat_mut!(tables) - .peer_qabls - .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); - } - } -} - -fn undeclare_peer_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res_hat!(res).peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); - propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); - } -} - -fn forget_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - - if wtables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } - } - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer queryable!"), - }, - None => log::error!("Undeclare peer queryable with unknown scope!"), - } -} - pub(super) fn undeclare_client_queryable( tables: &mut Tables, face: &mut Arc, @@ -961,43 +243,12 @@ pub(super) fn undeclare_client_queryable( } let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); - - match tables.whatami { - WhatAmI::Router => { - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - } else if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - _ => { - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } + if client_qabls.is_empty() { + propagate_forget_simple_queryable(tables, res); + } else { + propagate_simple_queryable(tables, res, None); } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { + if client_qabls.len() == 1 { let face = &mut client_qabls[0]; if face_hat!(face).local_qabls.contains_key(res) { let wire_expr = Resource::get_best_key(res, "", face.id); @@ -1053,340 +304,17 @@ fn forget_client_queryable( } } -pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } else { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } - WhatAmI::Client => { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } -} - -pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - } - } - WhatAmI::Peer => { - let mut qabls = vec![]; - for res in hat!(tables).router_qabls.iter() { - for qabl in res_hat!(res).router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); - } - } - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid) { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &face_hat!(src_face).remote_qabls { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if face_hat!(dst_face).local_qabls.contains_key(res) { - let forget = !HatTables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = hat!(tables) - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && HatTables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable( - UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }, - res.expr(), - )); - - face_hat_mut!(dst_face).local_qabls.remove(res); - } - } else if HatTables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - face_hat_mut!(dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - res.expr(), - )); - } - } - } - } - } - } - } -} - -pub(super) fn queries_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = hat!(tables).get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let qabls_res = match net_type { - WhatAmI::Router => &hat!(tables).router_qabls, - _ => &hat!(tables).peer_qabls, - }; - - for res in qabls_res { - let qabls = match net_type { - WhatAmI::Router => &res_hat!(res).router_qabls, - _ => &res_hat!(res).peer_qabls, - }; - if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( - tables, - net, - tree_childs, - res, - qabl_info, - None, - tree_sid as NodeId, - ); - } - } - } +pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { + for face in tables + .faces + .values() + .cloned() + .collect::>>() + { + for qabl in face_hat!(face).remote_qabls.iter() { + propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); } } - - // recompute routes - compute_query_routes_from(tables, &mut tables.root_res.clone()); -} - -#[inline] -#[allow(clippy::too_many_arguments)] -fn insert_target_for_qabls( - route: &mut QueryTargetQablSet, - expr: &mut RoutingExpr, - tables: &Tables, - net: &Network, - source: NodeId, - qabls: &HashMap, - complete: bool, -) { - if net.trees.len() > source as usize { - for (qabl, qabl_info) in qabls { - if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source as usize].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source as usize].directions[qabl_idx.index()] - { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - if net.distances.len() > qabl_idx.index() { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: (face.clone(), key_expr.to_owned(), source), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: net.distances[qabl_idx.index()], - }); - } - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } } lazy_static::lazy_static! { @@ -1400,28 +328,10 @@ impl HatQueriesTrait for HatCode { face: &mut Arc, expr: &WireExpr, qabl_info: &QueryableInfo, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_queryable(tables, rtables, face, expr, qabl_info, router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) - } - } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info) - } - } - _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), - } + declare_client_queryable(tables, rtables, face, expr, qabl_info); } fn forget_queryable( @@ -1429,28 +339,10 @@ impl HatQueriesTrait for HatCode { tables: &TablesLock, face: &mut Arc, expr: &WireExpr, - node_id: NodeId, + _node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_queryable(tables, rtables, face, expr, &router) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer) - } - } else { - forget_client_queryable(tables, rtables, face, expr) - } - } - _ => forget_client_queryable(tables, rtables, face, expr), - } + forget_client_queryable(tables, rtables, face, expr); } fn compute_query_route( @@ -1485,95 +377,21 @@ impl HatQueriesTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) - || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) - == tables.zid; - for mres in matches.iter() { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: ( - context.face.clone(), - key_expr.to_owned(), - NodeId::default(), - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } - } + for (sid, context) in &mres.session_ctxs { + let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); + if let Some(qabl_info) = context.qabl.as_ref() { + route.push(QueryTargetQabl { + direction: (context.face.clone(), key_expr.to_owned(), NodeId::default()), + complete: if complete { + qabl_info.complete as u64 + } else { + 0 + }, + distance: 0.5, + }); } } } @@ -1610,11 +428,7 @@ impl HatQueriesTrait for HatCode { .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!res_hat!(mres).router_subs.is_empty() - || !res_hat!(mres).peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { + if mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) { result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); } } @@ -1631,73 +445,8 @@ impl HatQueriesTrait for HatCode { client_query_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + routes.client_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); routes } @@ -1706,83 +455,12 @@ impl HatQueriesTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } + res_mut.context_mut().client_query_route = Some(self.compute_query_route( + tables, + &mut expr, + NodeId::default(), + WhatAmI::Client, + )); } } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 3cf318831b..5f7ed70645 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -226,7 +226,11 @@ impl HatTables { && self .peers_net .as_ref() - .map(|net| HatTables::failover_brokering_to(net.get_links(peer1), peer2)) + .map(|net| { + let links = net.get_links(peer1); + log::debug!("failover_brokering {} {} ({:?})", peer1, peer2, links); + HatTables::failover_brokering_to(links, peer2) + }) .unwrap_or(false) } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 189e6cb6e8..babc9509af 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -89,25 +89,13 @@ fn propagate_simple_subscription_to( ) { if (src_face.id != dst_face.id || res.expr().starts_with(PREFIX_LIVELINESS)) && !face_hat!(dst_face).local_subs.contains(res) - && match tables.whatami { - WhatAmI::Router => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) - } - } - WhatAmI::Peer => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client - } - } - _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, + && if full_peer_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables).failover_brokering(src_face.zid, dst_face.zid)) } { face_hat_mut!(dst_face).local_subs.insert(res.clone()); @@ -295,11 +283,6 @@ fn register_peer_subscription( // Propagate subscription to peers propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); } - - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); - } } fn declare_peer_subscription( @@ -336,12 +319,10 @@ fn declare_peer_subscription( (res, wtables) }; register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - if wtables.whatami == WhatAmI::Router { - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - } + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = wtables.zid; + register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -440,75 +421,8 @@ fn declare_client_subscription( register_client_subscription(&mut wtables, face, &mut res, sub_info); let mut propa_sub_info = *sub_info; propa_sub_info.mode = Mode::Push; - match wtables.whatami { - WhatAmI::Router => { - let zid = wtables.zid; - register_router_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let zid = wtables.zid; - register_peer_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } else { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } - _ => { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } - } - } + let zid = wtables.zid; + register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -778,10 +692,6 @@ fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, pe hat_mut!(tables) .peer_subs .retain(|sub| !Arc::ptr_eq(sub, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); - } } } @@ -810,13 +720,11 @@ fn forget_peer_subscription( drop(rtables); let mut wtables = zwrite!(tables.tables); undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(&wtables, &res); + let zid = wtables.zid; + if !client_subs && !peer_subs { + undeclare_router_subscription(&mut wtables, None, &mut res, &zid); } disable_matches_data_routes(&mut wtables, &mut res); drop(wtables); @@ -853,28 +761,10 @@ pub(super) fn undeclare_client_subscription( let mut client_subs = client_subs(res); let router_subs = remote_router_subs(tables, res); let peer_subs = remote_peer_subs(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if client_subs.is_empty() { - if hat!(tables).full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription(tables, res); - } - } - } - _ => { - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - } + if client_subs.is_empty() && !peer_subs { + undeclare_router_subscription(tables, None, res, &tables.zid.clone()); + } else { + propagate_forget_simple_subscription_to_peers(tables, res); } if client_subs.len() == 1 && !router_subs && !peer_subs { let face = &mut client_subs[0]; @@ -939,115 +829,51 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { reliability: Reliability::Reliable, // @TODO mode: Mode::Push, }; - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).router_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for sub in &hat!(tables).router_subs { - if sub.context.is_some() - && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for sub in &hat!(tables).peer_subs { - face_hat_mut!(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }, - sub.expr(), - )); - } - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } + + if face.whatami == WhatAmI::Client { + for sub in &hat!(tables).router_subs { + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } - WhatAmI::Client => { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for sub in &hat!(tables).router_subs { + if sub.context.is_some() + && (res_hat!(sub).router_subs.iter().any(|r| *r != tables.zid) + || sub.session_ctxs.values().any(|s| { + s.subs.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + })) { - for sub in &face_hat!(src_face).remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } + face_hat_mut!(face).local_subs.insert(sub.clone()); + let key_expr = Resource::decl_key(sub, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: key_expr, + ext_info: sub_info, + }), + }, + sub.expr(), + )); } } } @@ -1083,13 +909,10 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: .collect::>>() { unregister_peer_subscription(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); - } + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(tables, &res); + if !client_subs && !peer_subs { + undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); } // compute_matches_data_routes(tables, &mut res); @@ -1157,10 +980,7 @@ pub(super) fn pubsub_tree_change( pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid).cloned() { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { + if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in &face_hat!(src_face).remote_subs { let client_subs = res .session_ctxs @@ -1282,15 +1102,13 @@ impl HatPubSubTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match face.whatami { + WhatAmI::Router => { if let Some(router) = get_router(&rtables, face, node_id) { declare_router_subscription(tables, rtables, face, expr, sub_info, router) } } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat!(rtables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(&rtables, face, node_id) { declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) @@ -1311,15 +1129,13 @@ impl HatPubSubTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match face.whatami { + WhatAmI::Router => { if let Some(router) = get_router(&rtables, face, node_id) { forget_router_subscription(tables, rtables, face, expr, &router) } } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat!(rtables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(&rtables, face, node_id) { forget_peer_subscription(tables, rtables, face, expr, &peer) @@ -1364,53 +1180,33 @@ impl HatPubSubTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) + let master = !hat!(tables).full_net(WhatAmI::Peer) || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) == tables.zid; for mres in matches.iter() { let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_subs, - ); - } - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_subs, - ); - } + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as NodeId, + }; + insert_faces_for_subs( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_subs, + ); } - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, + WhatAmI::Peer => source, _ => net.idx.index() as NodeId, }; insert_faces_for_subs( @@ -1423,17 +1219,10 @@ impl HatPubSubTrait for HatCode { ); } - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { + if context.face.whatami != WhatAmI::Router && subinfo.mode == Mode::Push { route.entry(*sid).or_insert_with(|| { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); @@ -1493,34 +1282,28 @@ impl HatPubSubTrait for HatCode { client_data_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - for idx in &indexes { - routes.routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_data_routes + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + for idx in &indexes { + routes.routers_data_routes[idx.index()] = + self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + + routes.peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + + if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) .peers_net .as_ref() @@ -1542,24 +1325,6 @@ impl HatPubSubTrait for HatCode { ); } } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -1569,38 +1334,32 @@ impl HatPubSubTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - for idx in &indexes { - routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + routers_data_routes.clear(); + routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( + for idx in &indexes { + routers_data_routes[idx.index()] = self.compute_data_route( tables, &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); + idx.index() as NodeId, + WhatAmI::Router, + ); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + + res_mut.context_mut().peer_data_route = + Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + + if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) .peers_net .as_ref() @@ -1622,28 +1381,6 @@ impl HatPubSubTrait for HatCode { ); } } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index c1093a8a00..7cc7dc7e02 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -95,7 +95,7 @@ fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo } fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + let info = if res.context.is_some() { res_hat!(res) .router_qabls .iter() @@ -131,7 +131,7 @@ fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { } fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { - let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { + let mut info = if res.context.is_some() { res_hat!(res) .router_qabls .iter() @@ -243,35 +243,15 @@ fn propagate_simple_queryable( let current_info = face_hat!(dst_face).local_qabls.get(res); if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) && (current_info.is_none() || *current_info.unwrap() != info) - && match tables.whatami { - WhatAmI::Router => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || hat!(tables).failover_brokering( - src_face.as_ref().unwrap().zid, - dst_face.zid, - )) - } - } - WhatAmI::Peer => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - _ => { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } + && if full_peers_net { + dst_face.whatami == WhatAmI::Client + } else { + dst_face.whatami != WhatAmI::Router + && (src_face.is_none() + || src_face.as_ref().unwrap().whatami != WhatAmI::Peer + || dst_face.whatami != WhatAmI::Peer + || hat!(tables) + .failover_brokering(src_face.as_ref().unwrap().zid, dst_face.zid)) } { face_hat_mut!(&mut dst_face) @@ -432,7 +412,7 @@ fn declare_router_queryable( fn register_peer_queryable( tables: &mut Tables, - mut face: Option<&mut Arc>, + face: Option<&mut Arc>, res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, @@ -447,19 +427,7 @@ fn register_peer_queryable( } // Propagate queryable to peers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &peer, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); + propagate_sourced_queryable(tables, res, qabl_info, face, &peer, WhatAmI::Peer); } } @@ -499,11 +467,9 @@ fn declare_peer_queryable( }; let mut face = Some(face); register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - if wtables.whatami == WhatAmI::Router { - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - } + let local_info = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -583,38 +549,9 @@ fn declare_client_queryable( }; register_client_queryable(&mut wtables, face, &mut res, qabl_info); - - match wtables.whatami { - WhatAmI::Router => { - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } - WhatAmI::Peer => { - if hat!(wtables).full_net(WhatAmI::Peer) { - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } else { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - _ => { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } + let local_details = local_router_qabl_info(&wtables, &res); + let zid = wtables.zid; + register_router_queryable(&mut wtables, Some(face), &mut res, &local_details, zid); disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); @@ -882,10 +819,6 @@ fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: hat_mut!(tables) .peer_qabls .retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); - } } } @@ -915,16 +848,14 @@ fn forget_peer_queryable( let mut wtables = zwrite!(tables.tables); undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(&wtables, &res); + let zid = wtables.zid; + if !client_qabls && !peer_qabls { + undeclare_router_queryable(&mut wtables, None, &mut res, &zid); + } else { + let local_info = local_router_qabl_info(&wtables, &res); + register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); } drop(wtables); @@ -964,37 +895,12 @@ pub(super) fn undeclare_client_queryable( let router_qabls = remote_router_qabls(tables, res); let peer_qabls = remote_peer_qabls(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - } else if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - _ => { - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } + if client_qabls.is_empty() && !peer_qabls { + undeclare_router_queryable(tables, None, res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, tables.zid); + propagate_forget_simple_queryable_to_peers(tables, res); } if client_qabls.len() == 1 && !router_qabls && !peer_qabls { @@ -1054,108 +960,54 @@ fn forget_client_queryable( } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - for qabl in hat!(tables).router_qabls.iter() { - if qabl.context.is_some() - && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && hat!(tables) - .failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for qabl in &hat!(tables).peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }, - qabl.expr(), - )); - } - } - } - } else { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } + if face.whatami == WhatAmI::Client { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() { + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } - WhatAmI::Client => { - for face in tables - .faces - .values() - .cloned() - .collect::>>() + } else if face.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { + for qabl in hat!(tables).router_qabls.iter() { + if qabl.context.is_some() + && (res_hat!(qabl).router_qabls.keys().any(|r| *r != tables.zid) + || qabl.session_ctxs.values().any(|s| { + s.qabl.is_some() + && (s.face.whatami == WhatAmI::Client + || (s.face.whatami == WhatAmI::Peer + && hat!(tables).failover_brokering(s.face.zid, face.zid))) + })) { - for qabl in face_hat!(face).remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } + let info = local_qabl_info(tables, qabl, face); + face_hat_mut!(face).local_qabls.insert(qabl.clone(), info); + let key_expr = Resource::decl_key(qabl, face); + face.primitives.send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareQueryable(DeclareQueryable { + id: 0, // TODO + wire_expr: key_expr, + ext_info: info, + }), + }, + qabl.expr(), + )); } } } @@ -1196,15 +1048,13 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: for mut res in qabls { unregister_peer_queryable(tables, &mut res, node); - if tables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); - } + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(tables, &res); + if !client_qabls && !peer_qabls { + undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); + } else { + let local_info = local_router_qabl_info(tables, &res); + register_router_queryable(tables, None, &mut res, &local_info, tables.zid); } let matches_query_routes = compute_matches_query_routes_(tables, &res); @@ -1222,10 +1072,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: pub(super) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { if let Some(src_face) = tables.get_face(zid) { - if hat!(tables).router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { + if hat!(tables).router_peers_failover_brokering && src_face.whatami == WhatAmI::Peer { for res in &face_hat!(src_face).remote_qabls { let client_qabls = res .session_ctxs @@ -1403,15 +1250,13 @@ impl HatQueriesTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match face.whatami { + WhatAmI::Router => { if let Some(router) = get_router(&rtables, face, node_id) { declare_router_queryable(tables, rtables, face, expr, qabl_info, router) } } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat!(rtables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(&rtables, face, node_id) { declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) @@ -1432,15 +1277,13 @@ impl HatQueriesTrait for HatCode { node_id: NodeId, ) { let rtables = zread!(tables.tables); - match (rtables.whatami, face.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match face.whatami { + WhatAmI::Router => { if let Some(router) = get_router(&rtables, face, node_id) { forget_router_queryable(tables, rtables, face, expr, &router) } } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat!(rtables).full_net(WhatAmI::Peer) { if let Some(peer) = get_peer(&rtables, face, node_id) { forget_peer_queryable(tables, rtables, face, expr, &peer) @@ -1485,56 +1328,34 @@ impl HatQueriesTrait for HatCode { .map(|ctx| Cow::from(&ctx.matches)) .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - let master = tables.whatami != WhatAmI::Router - || !hat!(tables).full_net(WhatAmI::Peer) + let master = !hat!(tables).full_net(WhatAmI::Peer) || *hat!(tables).elect_router(&tables.zid, &key_expr, hat!(tables).shared_nodes.iter()) == tables.zid; for mres in matches.iter() { let mres = mres.upgrade().unwrap(); let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = hat!(tables).routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &res_hat!(mres).router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) - && hat!(tables).full_net(WhatAmI::Peer) - { - let net = hat!(tables).peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source, - _ => net.idx.index() as NodeId, - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &res_hat!(mres).peer_qabls, - complete, - ); - } + if master || source_type == WhatAmI::Router { + let net = hat!(tables).routers_net.as_ref().unwrap(); + let router_source = match source_type { + WhatAmI::Router => source, + _ => net.idx.index() as NodeId, + }; + insert_target_for_qabls( + &mut route, + expr, + tables, + net, + router_source, + &res_hat!(mres).router_qabls, + complete, + ); } - if tables.whatami == WhatAmI::Peer && hat!(tables).full_net(WhatAmI::Peer) { + if (master || source_type != WhatAmI::Router) && hat!(tables).full_net(WhatAmI::Peer) { let net = hat!(tables).peers_net.as_ref().unwrap(); let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source, + WhatAmI::Peer => source, _ => net.idx.index() as NodeId, }; insert_target_for_qabls( @@ -1548,15 +1369,9 @@ impl HatQueriesTrait for HatCode { ); } - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { + if master || source_type == WhatAmI::Router { for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } { + if context.face.whatami != WhatAmI::Router { let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); if let Some(qabl_info) = context.qabl.as_ref() { route.push(QueryTargetQabl { @@ -1631,34 +1446,28 @@ impl HatQueriesTrait for HatCode { client_query_route: None, }; let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - for idx in &indexes { - routes.routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + routes + .routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + for idx in &indexes { + routes.routers_query_routes[idx.index()] = + self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + + routes.peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + + if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) .peers_net .as_ref() @@ -1680,24 +1489,6 @@ impl HatQueriesTrait for HatCode { ); } } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } routes } @@ -1706,39 +1497,33 @@ impl HatQueriesTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - for idx in &indexes { - routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } + let indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .collect::>(); + let max_idx = indexes.iter().max().unwrap(); + let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + routers_query_routes.clear(); + routers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( + for idx in &indexes { + routers_query_routes[idx.index()] = self.compute_query_route( tables, &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); + idx.index() as NodeId, + WhatAmI::Router, + ); } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && hat!(tables).full_net(WhatAmI::Peer) - { + + res_mut.context_mut().peer_query_route = + Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + + if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) .peers_net .as_ref() @@ -1761,28 +1546,6 @@ impl HatQueriesTrait for HatCode { ); } } - if tables.whatami == WhatAmI::Peer && !hat!(tables).full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Peer, - )); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); - } } } } From 7c8732c11d9def3a26fcead1fcf4d93e23a875c2 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 22 Dec 2023 13:56:56 +0100 Subject: [PATCH 022/122] Reintroduce routes precomputation --- zenoh/src/net/routing/dispatcher/pubsub.rs | 126 ++--------- zenoh/src/net/routing/dispatcher/queries.rs | 125 ++--------- zenoh/src/net/routing/dispatcher/resource.rs | 206 ++++++------------ zenoh/src/net/routing/hat/client/mod.rs | 12 +- zenoh/src/net/routing/hat/client/pubsub.rs | 49 +++-- zenoh/src/net/routing/hat/client/queries.rs | 51 +++-- .../src/net/routing/hat/linkstate_peer/mod.rs | 12 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 49 +++-- .../net/routing/hat/linkstate_peer/queries.rs | 47 +++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 12 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 54 +++-- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 56 +++-- zenoh/src/net/routing/hat/router/mod.rs | 12 +- zenoh/src/net/routing/hat/router/pubsub.rs | 40 ++-- zenoh/src/net/routing/hat/router/queries.rs | 40 ++-- 15 files changed, 411 insertions(+), 480 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 33118be161..68291fcbd7 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -13,7 +13,7 @@ // use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; -use super::tables::{NodeId, RoutingExpr, Tables}; +use super::tables::{NodeId, Route, RoutingExpr, Tables}; use crate::net::routing::RoutingContext; use std::sync::Arc; use std::sync::RwLock; @@ -56,13 +56,13 @@ pub(crate) fn compute_matches_data_routes_<'a>( pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc) { if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_data_routes = false; + get_mut_unchecked(res).context_mut().disable_data_routes(); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_data_routes = false; + .disable_data_routes(); } } } @@ -104,95 +104,25 @@ macro_rules! treat_timestamp { } } -// #[inline] -// fn get_data_route( -// tables: &Tables, -// face: &FaceState, -// res: &Option>, -// expr: &mut RoutingExpr, -// routing_context: NodeId, -// ) -> Arc { -// let local_context = map_routing_context(tables, face, routing_context); -// match tables.whatami { -// WhatAmI::Router => match face.whatami { -// WhatAmI::Router => { -// res.as_ref() -// .and_then(|res| res.routers_data_route(local_context)) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, local_context, face.whatami) -// }) -// } -// WhatAmI::Peer => { -// if tables.hat.full_net(WhatAmI::Peer) { -// res.as_ref() -// .and_then(|res| res.peers_data_route(local_context)) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, local_context, face.whatami) -// }) -// } else { -// res.as_ref() -// .and_then(|res| res.peer_data_route()) -// .unwrap_or_else(|| { -// compute_data_route( -// tables, -// expr, -// NodeId::default(), -// face.whatami, -// ) -// }) -// } -// } -// _ => res -// .as_ref() -// .and_then(|res| res.routers_data_route(NodeId::default())) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, NodeId::default(), face.whatami) -// }), -// }, -// WhatAmI::Peer => { -// if tables.hat.full_net(WhatAmI::Peer) { -// match face.whatami { -// WhatAmI::Router | WhatAmI::Peer => { -// let peers_net = tables.hat.peers_net.as_ref().unwrap(); -// let local_context = -// peers_net.get_local_context(routing_context, face.link_id); -// res.as_ref() -// .and_then(|res| res.peers_data_route(local_context)) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, local_context, face.whatami) -// }) -// } -// _ => res -// .as_ref() -// .and_then(|res| res.peers_data_route(NodeId::default())) -// .unwrap_or_else(|| { -// compute_data_route( -// tables, -// expr, -// NodeId::default(), -// face.whatami, -// ) -// }), -// } -// } else { -// res.as_ref() -// .and_then(|res| match face.whatami { -// WhatAmI::Client => res.client_data_route(), -// _ => res.peer_data_route(), -// }) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, NodeId::default(), face.whatami) -// }) -// } -// } -// _ => res -// .as_ref() -// .and_then(|res| res.client_data_route()) -// .unwrap_or_else(|| { -// compute_data_route(tables, expr, NodeId::default(), face.whatami) -// }), -// } -// } +#[inline] +fn get_data_route( + tables: &Tables, + face: &FaceState, + res: &Option>, + expr: &mut RoutingExpr, + routing_context: NodeId, +) -> Arc { + let local_context = tables + .hat_code + .map_routing_context(tables, face, routing_context); + res.as_ref() + .and_then(|res| res.data_route(face.whatami, local_context)) + .unwrap_or_else(|| { + tables + .hat_code + .compute_data_route(tables, expr, local_context, face.whatami) + }) +} #[inline] fn get_matching_pulls( @@ -276,17 +206,7 @@ pub fn full_reentrant_route_data( if tables.hat_code.ingress_filter(&tables, face, &mut expr) { let res = Resource::get_resource(&prefix, expr.suffix); - // let route = get_data_route(&tables, face, &res, &mut expr, routing_context); - let local_context = - tables - .hat_code - .map_routing_context(&tables, face, routing_context); - let route = tables.hat_code.compute_data_route( - &tables, - &mut expr, - local_context, - face.whatami, - ); + let route = get_data_route(&tables, face, &res, &mut expr, routing_context); let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index b29cd36c22..5016005fe5 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -247,110 +247,37 @@ impl Timed for QueryCleanup { pub(crate) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc) { if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_query_routes = false; + get_mut_unchecked(res).context_mut().disable_query_routes(); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_query_routes = false; + .disable_query_routes(); } } } } -// #[inline] -// fn get_query_route( -// tables: &Tables, -// face: &FaceState, -// res: &Option>, -// expr: &mut RoutingExpr, -// routing_context: NodeId, -// ) -> Arc { -// match tables.whatami { -// WhatAmI::Router => match face.whatami { -// WhatAmI::Router => { -// let routers_net = tables.hat.routers_net.as_ref().unwrap(); -// let local_context = routers_net.get_local_context(routing_context, face.link_id); -// res.as_ref() -// .and_then(|res| res.routers_query_route(local_context)) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, local_context, face.whatami) -// }) -// } -// WhatAmI::Peer => { -// if tables.hat.full_net(WhatAmI::Peer) { -// let peers_net = tables.hat.peers_net.as_ref().unwrap(); -// let local_context = peers_net.get_local_context(routing_context, face.link_id); -// res.as_ref() -// .and_then(|res| res.peers_query_route(local_context)) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, local_context, face.whatami) -// }) -// } else { -// res.as_ref() -// .and_then(|res| res.peer_query_route()) -// .unwrap_or_else(|| { -// compute_query_route( -// tables, -// expr, -// NodeId::default(), -// face.whatami, -// ) -// }) -// } -// } -// _ => res -// .as_ref() -// .and_then(|res| res.routers_query_route(NodeId::default())) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, NodeId::default(), face.whatami) -// }), -// }, -// WhatAmI::Peer => { -// if tables.hat.full_net(WhatAmI::Peer) { -// match face.whatami { -// WhatAmI::Router | WhatAmI::Peer => { -// let peers_net = tables.hat.peers_net.as_ref().unwrap(); -// let local_context = -// peers_net.get_local_context(routing_context, face.link_id); -// res.as_ref() -// .and_then(|res| res.peers_query_route(local_context)) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, local_context, face.whatami) -// }) -// } -// _ => res -// .as_ref() -// .and_then(|res| res.peers_query_route(NodeId::default())) -// .unwrap_or_else(|| { -// compute_query_route( -// tables, -// expr, -// NodeId::default(), -// face.whatami, -// ) -// }), -// } -// } else { -// res.as_ref() -// .and_then(|res| match face.whatami { -// WhatAmI::Client => res.client_query_route(), -// _ => res.peer_query_route(), -// }) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, NodeId::default(), face.whatami) -// }) -// } -// } -// _ => res -// .as_ref() -// .and_then(|res| res.client_query_route()) -// .unwrap_or_else(|| { -// compute_query_route(tables, expr, NodeId::default(), face.whatami) -// }), -// } -// } +#[inline] +fn get_query_route( + tables: &Tables, + face: &FaceState, + res: &Option>, + expr: &mut RoutingExpr, + routing_context: NodeId, +) -> Arc { + let local_context = tables + .hat_code + .map_routing_context(tables, face, routing_context); + res.as_ref() + .and_then(|res| res.query_route(face.whatami, local_context)) + .unwrap_or_else(|| { + tables + .hat_code + .compute_query_route(tables, expr, local_context, face.whatami) + }) +} #[cfg(feature = "stats")] macro_rules! inc_req_stats { @@ -450,13 +377,9 @@ pub fn route_query( } if rtables.hat_code.ingress_filter(&rtables, face, &mut expr) { - // let res = Resource::get_resource(&prefix, expr.suffix); - let route = rtables.hat_code.compute_query_route( - &rtables, - &mut expr, - routing_context, - face.whatami, - ); + let res = Resource::get_resource(&prefix, expr.suffix); + + let route = get_query_route(&rtables, face, &res, &mut expr, routing_context); let query = Arc::new(Query { src_face: face.clone(), diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 202a104922..00b7b00667 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -20,6 +20,7 @@ use std::collections::HashMap; use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::sync::{Arc, Weak}; +use zenoh_config::WhatAmI; #[cfg(feature = "complete_n")] use zenoh_protocol::network::request::ext::TargetType; use zenoh_protocol::network::RequestId; @@ -61,19 +62,52 @@ pub(crate) struct SessionContext { pub(crate) last_values: HashMap, } +#[derive(Default)] pub(crate) struct DataRoutes { - pub(crate) matching_pulls: Option>, - pub(crate) routers_data_routes: Vec>, - pub(crate) peers_data_routes: Vec>, - pub(crate) peer_data_route: Option>, - pub(crate) client_data_route: Option>, + pub(crate) routers: Vec>, + pub(crate) peers: Vec>, + pub(crate) clients: Vec>, } +impl DataRoutes { + #[inline] + pub(crate) fn get_route(&self, whatami: WhatAmI, context: NodeId) -> Option> { + match whatami { + WhatAmI::Router => (self.routers.len() > context as usize) + .then(|| self.routers[context as usize].clone()), + WhatAmI::Peer => { + (self.peers.len() > context as usize).then(|| self.peers[context as usize].clone()) + } + WhatAmI::Client => (self.clients.len() > context as usize) + .then(|| self.clients[context as usize].clone()), + } + } +} + +#[derive(Default)] pub(crate) struct QueryRoutes { - pub(crate) routers_query_routes: Vec>, - pub(crate) peers_query_routes: Vec>, - pub(crate) peer_query_route: Option>, - pub(crate) client_query_route: Option>, + pub(crate) routers: Vec>, + pub(crate) peers: Vec>, + pub(crate) clients: Vec>, +} + +impl QueryRoutes { + #[inline] + pub(crate) fn get_route( + &self, + whatami: WhatAmI, + context: NodeId, + ) -> Option> { + match whatami { + WhatAmI::Router => (self.routers.len() > context as usize) + .then(|| self.routers[context as usize].clone()), + WhatAmI::Peer => { + (self.peers.len() > context as usize).then(|| self.peers[context as usize].clone()) + } + WhatAmI::Client => (self.clients.len() > context as usize) + .then(|| self.clients[context as usize].clone()), + } + } } pub(crate) struct ResourceContext { @@ -81,15 +115,9 @@ pub(crate) struct ResourceContext { pub(crate) matching_pulls: Arc, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, - pub(crate) routers_data_routes: Vec>, - pub(crate) peers_data_routes: Vec>, - pub(crate) peer_data_route: Option>, - pub(crate) client_data_route: Option>, + pub(crate) data_routes: DataRoutes, pub(crate) valid_query_routes: bool, - pub(crate) routers_query_routes: Vec>, - pub(crate) peers_query_routes: Vec>, - pub(crate) peer_query_route: Option>, - pub(crate) client_query_route: Option>, + pub(crate) query_routes: QueryRoutes, } impl ResourceContext { @@ -99,35 +127,31 @@ impl ResourceContext { matching_pulls: Arc::new(Vec::new()), hat, valid_data_routes: false, - routers_data_routes: Vec::new(), - peers_data_routes: Vec::new(), - peer_data_route: None, - client_data_route: None, + data_routes: DataRoutes::default(), valid_query_routes: false, - routers_query_routes: Vec::new(), - peers_query_routes: Vec::new(), - peer_query_route: None, - client_query_route: None, + query_routes: QueryRoutes::default(), } } pub(crate) fn update_data_routes(&mut self, data_routes: DataRoutes) { self.valid_data_routes = true; - if let Some(matching_pulls) = data_routes.matching_pulls { - self.matching_pulls = matching_pulls; - } - self.routers_data_routes = data_routes.routers_data_routes; - self.peers_data_routes = data_routes.peers_data_routes; - self.peer_data_route = data_routes.peer_data_route; - self.client_data_route = data_routes.client_data_route; + // if let Some(matching_pulls) = data_routes.matching_pulls { + // self.matching_pulls = matching_pulls; + // } + self.data_routes = data_routes; + } + + pub(crate) fn disable_data_routes(&mut self) { + self.valid_data_routes = false; } pub(crate) fn update_query_routes(&mut self, query_routes: QueryRoutes) { self.valid_query_routes = true; - self.routers_query_routes = query_routes.routers_query_routes; - self.peers_query_routes = query_routes.peers_query_routes; - self.peer_query_route = query_routes.peer_query_route; - self.client_query_route = query_routes.client_query_route; + self.query_routes = query_routes + } + + pub(crate) fn disable_query_routes(&mut self) { + self.valid_query_routes = false; } } @@ -206,57 +230,31 @@ impl Resource { } } - #[inline(always)] - pub fn routers_data_route(&self, context: NodeId) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_data_routes { - (ctx.routers_data_routes.len() > context as usize) - .then(|| ctx.routers_data_routes[context as usize].clone()) - } else { - None - } - } - - None => None, - } - } - - #[inline(always)] - pub fn peers_data_route(&self, context: NodeId) -> Option> { + #[inline] + pub(crate) fn data_route(&self, whatami: WhatAmI, context: NodeId) -> Option> { match &self.context { Some(ctx) => { if ctx.valid_data_routes { - (ctx.peers_data_routes.len() > context as usize) - .then(|| ctx.peers_data_routes[context as usize].clone()) + ctx.data_routes.get_route(whatami, context) } else { None } } - None => None, - } - } - #[inline(always)] - pub fn peer_data_route(&self) -> Option> { - match &self.context { - Some(ctx) => { - if ctx.valid_data_routes { - ctx.peer_data_route.clone() - } else { - None - } - } None => None, } } #[inline(always)] - pub fn client_data_route(&self) -> Option> { + pub(crate) fn query_route( + &self, + whatami: WhatAmI, + context: NodeId, + ) -> Option> { match &self.context { Some(ctx) => { - if ctx.valid_data_routes { - ctx.client_data_route.clone() + if ctx.valid_query_routes { + ctx.query_routes.get_route(whatami, context) } else { None } @@ -265,70 +263,6 @@ impl Resource { } } - // #[inline(always)] - // pub(crate) fn routers_query_route( - // &self, - // context: NodeId, - // ) -> Option> { - // match &self.context { - // Some(ctx) => { - // if ctx.valid_query_routes { - // (ctx.routers_query_routes.len() > context as usize) - // .then(|| ctx.routers_query_routes[context as usize].clone()) - // } else { - // None - // } - // } - // None => None, - // } - // } - - // #[inline(always)] - // pub(crate) fn peers_query_route( - // &self, - // context: NodeId, - // ) -> Option> { - // match &self.context { - // Some(ctx) => { - // if ctx.valid_query_routes { - // (ctx.peers_query_routes.len() > context as usize) - // .then(|| ctx.peers_query_routes[context as usize].clone()) - // } else { - // None - // } - // } - // None => None, - // } - // } - - // #[inline(always)] - // pub(crate) fn peer_query_route(&self) -> Option> { - // match &self.context { - // Some(ctx) => { - // if ctx.valid_query_routes { - // ctx.peer_query_route.clone() - // } else { - // None - // } - // } - // None => None, - // } - // } - - // #[inline(always)] - // pub(crate) fn client_query_route(&self) -> Option> { - // match &self.context { - // Some(ctx) => { - // if ctx.valid_query_routes { - // ctx.client_query_route.clone() - // } else { - // None - // } - // } - // None => None, - // } - // } - pub fn root() -> Arc { Arc::new(Resource { parent: None, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 615954da60..0a7179e59d 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -142,11 +142,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_data_routes = false; + .disable_data_routes(); subs_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_data_routes(); subs_matches.push(res); } } @@ -168,11 +170,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_query_routes = false; + .disable_query_routes(); qabls_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_query_routes(); qabls_matches.push(res); } } diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 8c501c5897..9bbb6f9619 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -442,17 +442,21 @@ impl HatPubSubTrait for HatCode { } fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; + let mut routes = DataRoutes::default(); let mut expr = RoutingExpr::new(res, ""); - routes.client_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); - routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + + let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + routes.routers.resize_with(1, || Arc::new(HashMap::new())); + routes.routers[0] = route.clone(); + + routes.peers.resize_with(1, || Arc::new(HashMap::new())); + routes.peers[0] = route.clone(); + + routes.clients.resize_with(1, || Arc::new(HashMap::new())); + routes.clients[0] = route; + + // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -462,14 +466,25 @@ impl HatPubSubTrait for HatCode { let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); + let route = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; + routers_data_routes.clear(); + routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + routers_data_routes[0] = route.clone(); + + let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; + peers_data_routes.clear(); + peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + peers_data_routes[0] = route.clone(); + + let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; + clients_data_routes.clear(); + clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); + clients_data_routes[0] = route; - res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 41ef4feb0d..55e2bf7765 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -438,15 +438,26 @@ impl HatQueriesTrait for HatCode { } fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; + let mut routes = QueryRoutes::default(); let mut expr = RoutingExpr::new(res, ""); - routes.client_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); + + let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + routes + .routers + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.routers[0] = route.clone(); + + routes + .peers + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.peers[0] = route.clone(); + + routes + .clients + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.clients[0] = route; + routes } @@ -455,12 +466,24 @@ impl HatQueriesTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); + + let route = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; + routers_query_routes.clear(); + routers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routers_query_routes[0] = route.clone(); + + let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; + peers_query_routes.clear(); + peers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + peers_query_routes[0] = route.clone(); + + let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; + clients_query_routes.clear(); + clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + clients_query_routes[0] = route; } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 4b20278d1d..5bdc1a3f6a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -258,11 +258,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_data_routes = false; + .disable_data_routes(); subs_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_data_routes(); subs_matches.push(res); } } @@ -284,11 +286,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_query_routes = false; + .disable_query_routes(); qabls_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_query_routes(); qabls_matches.push(res); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 40bd6d24d7..4089aba970 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -861,13 +861,7 @@ impl HatPubSubTrait for HatCode { } fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; + let mut routes = DataRoutes::default(); let mut expr = RoutingExpr::new(res, ""); let indexes = hat!(tables) .peers_net @@ -878,15 +872,22 @@ impl HatPubSubTrait for HatCode { .collect::>(); let max_idx = indexes.iter().max().unwrap(); routes - .peers_data_routes + .routers + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + routes + .peers .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.peers_data_routes[idx.index()] = + let route = self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); + routes.routers[idx.index()] = route.clone(); + routes.peers[idx.index()] = route; } + routes.clients.resize_with(1, || Arc::new(HashMap::new())); + routes.clients[0] = self.compute_data_route(tables, &mut expr, 0, WhatAmI::Peer); - routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -903,20 +904,38 @@ impl HatPubSubTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + res_mut.context_mut().data_routes.routers.clear(); + res_mut + .context_mut() + .data_routes + .routers + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + res_mut.context_mut().data_routes.peers.clear(); + res_mut + .context_mut() + .data_routes + .peers + .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - peers_data_routes[idx.index()] = self.compute_data_route( + let route = self.compute_data_route( tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer, ); + res_mut.context_mut().data_routes.routers[idx.index()] = route.clone(); + res_mut.context_mut().data_routes.peers[idx.index()] = route; } - res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; + clients_data_routes.clear(); + clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); + clients_data_routes[0] = self.compute_data_route(tables, &mut expr, 0, WhatAmI::Peer); + + // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 42917b350c..da7b94f945 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -941,12 +941,7 @@ impl HatQueriesTrait for HatCode { } fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; + let mut routes = QueryRoutes::default(); let mut expr = RoutingExpr::new(res, ""); let indexes = hat!(tables) .peers_net @@ -956,14 +951,25 @@ impl HatQueriesTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); + + routes + .routers + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); routes - .peers_query_routes + .peers .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.peers_query_routes[idx.index()] = + let route = self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); + routes.routers[idx.index()] = route.clone(); + routes.peers[idx.index()] = route; } + routes + .clients + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.clients[0] = self.compute_query_route(tables, &mut expr, 0, WhatAmI::Peer); + routes } @@ -981,19 +987,36 @@ impl HatQueriesTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes + + res_mut.context_mut().query_routes.routers.clear(); + res_mut + .context_mut() + .query_routes + .routers + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + + res_mut.context_mut().query_routes.peers.clear(); + res_mut + .context_mut() + .query_routes + .peers .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - peers_query_routes[idx.index()] = self.compute_query_route( + let route = self.compute_query_route( tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer, ); + res_mut.context_mut().query_routes.routers[idx.index()] = route.clone(); + res_mut.context_mut().query_routes.peers[idx.index()] = route; } + + let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; + clients_query_routes.clear(); + clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + clients_query_routes[0] = self.compute_query_route(tables, &mut expr, 0, WhatAmI::Peer); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 3e74672903..b03ef98dc0 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -186,11 +186,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_data_routes = false; + .disable_data_routes(); subs_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_data_routes(); subs_matches.push(res); } } @@ -212,11 +214,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_query_routes = false; + .disable_query_routes(); qabls_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_query_routes(); qabls_matches.push(res); } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 8c501c5897..abf7ee9948 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -442,17 +442,23 @@ impl HatPubSubTrait for HatCode { } fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; + let mut routes = DataRoutes::default(); let mut expr = RoutingExpr::new(res, ""); - routes.client_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); - routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + + let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + + routes.routers.resize_with(1, || Arc::new(HashMap::new())); + routes.routers[0] = route.clone(); + + routes.peers.resize_with(1, || Arc::new(HashMap::new())); + routes.peers[0] = route; + + let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + routes.clients.resize_with(1, || Arc::new(HashMap::new())); + routes.clients[0] = route; + + // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -462,14 +468,28 @@ impl HatPubSubTrait for HatCode { let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - res_mut.context_mut().client_data_route = Some(self.compute_data_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); + let route = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + + let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; + routers_data_routes.clear(); + routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + routers_data_routes[0] = route.clone(); + + let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; + peers_data_routes.clear(); + peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + peers_data_routes[0] = route; + + let route = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; + clients_data_routes.clear(); + clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); + clients_data_routes[0] = route; - res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 41ef4feb0d..db1041f359 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -438,15 +438,28 @@ impl HatQueriesTrait for HatCode { } fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; + let mut routes = QueryRoutes::default(); let mut expr = RoutingExpr::new(res, ""); - routes.client_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client)); + + let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + + routes + .routers + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.routers[0] = route.clone(); + + routes + .peers + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.peers[0] = route; + + let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + routes + .clients + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.clients[0] = route; + routes } @@ -455,12 +468,27 @@ impl HatQueriesTrait for HatCode { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); let mut expr = RoutingExpr::new(res, ""); - res_mut.context_mut().client_query_route = Some(self.compute_query_route( - tables, - &mut expr, - NodeId::default(), - WhatAmI::Client, - )); + + let route = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + + let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; + routers_query_routes.clear(); + routers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routers_query_routes[0] = route.clone(); + + let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; + peers_query_routes.clear(); + peers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + peers_query_routes[0] = route; + + let route = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; + clients_query_routes.clear(); + clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); + clients_query_routes[0] = route; } } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 5f7ed70645..e485a1d880 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -432,11 +432,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_data_routes = false; + .disable_data_routes(); subs_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_data_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_data_routes(); subs_matches.push(res); } } @@ -458,11 +460,13 @@ impl HatBaseTrait for HatCode { if !Arc::ptr_eq(&match_, &res) { get_mut_unchecked(&mut match_) .context_mut() - .valid_query_routes = false; + .disable_query_routes(); qabls_matches.push(match_); } } - get_mut_unchecked(&mut res).context_mut().valid_query_routes = false; + get_mut_unchecked(&mut res) + .context_mut() + .disable_query_routes(); qabls_matches.push(res); } } diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index babc9509af..332f1af7fa 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -1274,13 +1274,7 @@ impl HatPubSubTrait for HatCode { } fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; + let mut routes = DataRoutes::default(); let mut expr = RoutingExpr::new(res, ""); let indexes = hat!(tables) @@ -1292,16 +1286,17 @@ impl HatPubSubTrait for HatCode { .collect::>(); let max_idx = indexes.iter().max().unwrap(); routes - .routers_data_routes + .routers .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.routers_data_routes[idx.index()] = + routes.routers[idx.index()] = self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); } - routes.peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + routes.peers.resize_with(1, || Arc::new(HashMap::new())); + routes.peers[0] = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1313,11 +1308,11 @@ impl HatPubSubTrait for HatCode { .collect::>(); let max_idx = indexes.iter().max().unwrap(); routes - .peers_data_routes + .peers .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.peers_data_routes[idx.index()] = self.compute_data_route( + routes.peers[idx.index()] = self.compute_data_route( tables, &mut expr, idx.index() as NodeId, @@ -1325,7 +1320,11 @@ impl HatPubSubTrait for HatCode { ); } } - routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); + routes.clients.resize_with(1, || Arc::new(HashMap::new())); + routes.clients[0] = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + + // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); routes } @@ -1343,7 +1342,7 @@ impl HatPubSubTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; + let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; routers_data_routes.clear(); routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); @@ -1356,8 +1355,11 @@ impl HatPubSubTrait for HatCode { ); } - res_mut.context_mut().peer_data_route = - Some(self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; + peers_data_routes.clear(); + peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + peers_data_routes[0] = + self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1368,7 +1370,7 @@ impl HatPubSubTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; + let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; peers_data_routes.clear(); peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); @@ -1381,7 +1383,7 @@ impl HatPubSubTrait for HatCode { ); } } - res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); + // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); } } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 7cc7dc7e02..b0c716ad8d 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -1439,12 +1439,7 @@ impl HatQueriesTrait for HatCode { } fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; + let mut routes = QueryRoutes::default(); let mut expr = RoutingExpr::new(res, ""); let indexes = hat!(tables) @@ -1456,16 +1451,19 @@ impl HatQueriesTrait for HatCode { .collect::>(); let max_idx = indexes.iter().max().unwrap(); routes - .routers_query_routes + .routers .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.routers_query_routes[idx.index()] = + routes.routers[idx.index()] = self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); } - routes.peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + routes + .peers + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.peers[0] = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1477,11 +1475,11 @@ impl HatQueriesTrait for HatCode { .collect::>(); let max_idx = indexes.iter().max().unwrap(); routes - .peers_query_routes + .peers .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.peers_query_routes[idx.index()] = self.compute_query_route( + routes.peers[idx.index()] = self.compute_query_route( tables, &mut expr, idx.index() as NodeId, @@ -1489,6 +1487,12 @@ impl HatQueriesTrait for HatCode { ); } } + routes + .clients + .resize_with(1, || Arc::new(QueryTargetQablSet::new())); + routes.clients[0] = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + routes } @@ -1506,7 +1510,7 @@ impl HatQueriesTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; + let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; routers_query_routes.clear(); routers_query_routes .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); @@ -1520,8 +1524,12 @@ impl HatQueriesTrait for HatCode { ); } - res_mut.context_mut().peer_query_route = - Some(self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer)); + let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; + peers_query_routes.clear(); + peers_query_routes + .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); + peers_query_routes[0] = + self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1532,7 +1540,7 @@ impl HatQueriesTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; + let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; peers_query_routes.clear(); peers_query_routes .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); From b2a7ee2ddd4897c80e036de63ae6520af81753d3 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 22 Dec 2023 21:42:26 +0100 Subject: [PATCH 023/122] Improve routes precomputation --- zenoh/src/net/routing/dispatcher/pubsub.rs | 24 +++- zenoh/src/net/routing/dispatcher/queries.rs | 18 ++- zenoh/src/net/routing/hat/client/mod.rs | 4 +- zenoh/src/net/routing/hat/client/pubsub.rs | 71 ++++------- zenoh/src/net/routing/hat/client/queries.rs | 44 ++----- .../src/net/routing/hat/linkstate_peer/mod.rs | 4 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 92 ++++---------- .../net/routing/hat/linkstate_peer/queries.rs | 77 ++---------- zenoh/src/net/routing/hat/mod.rs | 54 +++++++- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 4 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 52 ++------ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 49 ++------ zenoh/src/net/routing/hat/router/mod.rs | 4 +- zenoh/src/net/routing/hat/router/pubsub.rs | 116 +++--------------- zenoh/src/net/routing/hat/router/queries.rs | 114 +++-------------- zenoh/src/net/routing/router.rs | 4 +- 16 files changed, 221 insertions(+), 510 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 68291fcbd7..85bd72aec5 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -25,15 +25,15 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; -pub(crate) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { - tables.hat_code.clone().compute_data_routes(tables, res); +pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { + tables.hat_code.clone().update_data_routes(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { - compute_data_routes_from(tables, child); + update_data_routes_from(tables, child); } } -pub(crate) fn compute_matches_data_routes_<'a>( +pub(crate) fn compute_matches_data_routes<'a>( tables: &'a Tables, res: &'a Arc, ) -> Vec<(Arc, DataRoutes)> { @@ -41,12 +41,12 @@ pub(crate) fn compute_matches_data_routes_<'a>( if res.context.is_some() { routes.push(( res.clone(), - tables.hat_code.compute_data_routes_(tables, res), + tables.hat_code.compute_data_routes(tables, res), )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = tables.hat_code.compute_data_routes_(tables, &match_); + let match_routes = tables.hat_code.compute_data_routes(tables, &match_); routes.push((match_, match_routes)); } } @@ -54,6 +54,18 @@ pub(crate) fn compute_matches_data_routes_<'a>( routes } +pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { + if res.context.is_some() { + tables.hat_code.update_data_routes(tables, res); + for match_ in &res.context().matches { + let match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + tables.hat_code.update_data_routes(tables, &match_); + } + } + } +} + pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc) { if res.context.is_some() { get_mut_unchecked(res).context_mut().disable_data_routes(); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 5016005fe5..7f62913d08 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -45,7 +45,7 @@ pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc, ) -> Vec<(Arc, QueryRoutes)> { @@ -53,12 +53,12 @@ pub(crate) fn compute_matches_query_routes_( if res.context.is_some() { routes.push(( res.clone(), - tables.hat_code.compute_query_routes_(tables, res), + tables.hat_code.compute_query_routes(tables, res), )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = tables.hat_code.compute_query_routes_(tables, &match_); + let match_routes = tables.hat_code.compute_query_routes(tables, &match_); routes.push((match_, match_routes)); } } @@ -66,6 +66,18 @@ pub(crate) fn compute_matches_query_routes_( routes } +pub(crate) fn update_matches_query_routes(tables: &Tables, res: &Arc) { + if res.context.is_some() { + tables.hat_code.update_query_routes(tables, res); + for match_ in &res.context().matches { + let match_ = match_.upgrade().unwrap(); + if !Arc::ptr_eq(&match_, res) { + tables.hat_code.update_query_routes(tables, &match_); + } + } + } +} + #[inline] fn insert_pending_query(outface: &mut Arc, query: Arc) -> RequestId { let outface_mut = get_mut_unchecked(outface); diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 0a7179e59d..67c56ebdf3 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -188,13 +188,13 @@ impl HatBaseTrait for HatCode { for _match in subs_matches.drain(..) { matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &_match), )); } for _match in qabls_matches.drain(..) { matches_query_routes.push(( _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), + rtables.hat_code.compute_query_routes(&rtables, &_match), )); } drop(rtables); diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 9bbb6f9619..78291381b2 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -184,7 +184,7 @@ fn declare_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -289,7 +289,7 @@ fn forget_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -441,50 +441,27 @@ impl HatPubSubTrait for HatCode { Arc::new(pull_caches) } - fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - - let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - routes.routers.resize_with(1, || Arc::new(HashMap::new())); - routes.routers[0] = route.clone(); - - routes.peers.resize_with(1, || Arc::new(HashMap::new())); - routes.peers[0] = route.clone(); - - routes.clients.resize_with(1, || Arc::new(HashMap::new())); - routes.clients[0] = route; - - // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); - routes - } - - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let route = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; - routers_data_routes.clear(); - routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - routers_data_routes[0] = route.clone(); - - let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; - peers_data_routes.clear(); - peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - peers_data_routes[0] = route.clone(); - - let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; - clients_data_routes.clear(); - clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); - clients_data_routes[0] = route; - - // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); - } + fn compute_data_routes_( + &self, + tables: &Tables, + data_routes: &mut DataRoutes, + expr: &mut RoutingExpr, + ) { + let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); + + let routers_data_routes = &mut data_routes.routers; + routers_data_routes.clear(); + routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + routers_data_routes[0] = route.clone(); + + let peers_data_routes = &mut data_routes.peers; + peers_data_routes.clear(); + peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); + peers_data_routes[0] = route.clone(); + + let clients_data_routes = &mut data_routes.clients; + clients_data_routes.clear(); + clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); + clients_data_routes[0] = route; } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 55e2bf7765..1e503478c7 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -178,7 +178,7 @@ fn declare_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -286,7 +286,7 @@ fn forget_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -437,11 +437,13 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - - let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + fn compute_query_routes_( + &self, + tables: &Tables, + routes: &mut QueryRoutes, + expr: &mut RoutingExpr, + ) { + let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); routes .routers @@ -457,33 +459,5 @@ impl HatQueriesTrait for HatCode { .clients .resize_with(1, || Arc::new(QueryTargetQablSet::new())); routes.clients[0] = route; - - routes - } - - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let route = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; - routers_query_routes.clear(); - routers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routers_query_routes[0] = route.clone(); - - let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; - peers_query_routes.clear(); - peers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - peers_query_routes[0] = route.clone(); - - let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; - clients_query_routes.clear(); - clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - clients_query_routes[0] = route; - } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 5bdc1a3f6a..21d2afdec8 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -304,13 +304,13 @@ impl HatBaseTrait for HatCode { for _match in subs_matches.drain(..) { matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &_match), )); } for _match in qabls_matches.drain(..) { matches_query_routes.push(( _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), + rtables.hat_code.compute_query_routes(&rtables, &_match), )); } drop(rtables); diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 4089aba970..647c864010 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -224,7 +224,7 @@ fn declare_peer_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -324,7 +324,7 @@ fn declare_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -506,7 +506,7 @@ fn forget_peer_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); for (mut res, data_routes) in matches_data_routes { @@ -579,7 +579,7 @@ fn forget_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -634,13 +634,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId) { { unregister_peer_subscription(tables, &mut res, node); - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } + update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) } } @@ -681,7 +675,7 @@ pub(super) fn pubsub_tree_change(tables: &mut Tables, new_childs: &[Vec) -> DataRoutes { - let mut routes = DataRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); + fn compute_data_routes_( + &self, + tables: &Tables, + routes: &mut DataRoutes, + expr: &mut RoutingExpr, + ) { let indexes = hat!(tables) .peers_net .as_ref() @@ -871,71 +868,28 @@ impl HatPubSubTrait for HatCode { .node_indices() .collect::>(); let max_idx = indexes.iter().max().unwrap(); + + routes.routers.clear(); + routes .routers .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); + + routes.peers.clear(); + routes .peers .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - let route = - self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); + let route = self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); routes.routers[idx.index()] = route.clone(); routes.peers[idx.index()] = route; } - routes.clients.resize_with(1, || Arc::new(HashMap::new())); - routes.clients[0] = self.compute_data_route(tables, &mut expr, 0, WhatAmI::Peer); - // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); - routes - } - - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - res_mut.context_mut().data_routes.routers.clear(); - res_mut - .context_mut() - .data_routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - res_mut.context_mut().data_routes.peers.clear(); - res_mut - .context_mut() - .data_routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - let route = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - res_mut.context_mut().data_routes.routers[idx.index()] = route.clone(); - res_mut.context_mut().data_routes.peers[idx.index()] = route; - } - - let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; - clients_data_routes.clear(); - clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); - clients_data_routes[0] = self.compute_data_route(tables, &mut expr, 0, WhatAmI::Peer); - - // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); - } + let clients_data_routes = &mut routes.clients; + clients_data_routes.clear(); + clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); + clients_data_routes[0] = self.compute_data_route(tables, expr, 0, WhatAmI::Peer); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index da7b94f945..23295f8948 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -297,7 +297,7 @@ fn declare_peer_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -380,7 +380,7 @@ fn declare_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -558,7 +558,7 @@ fn forget_peer_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -637,7 +637,7 @@ fn forget_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -692,12 +692,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId) { for mut res in qabls { unregister_peer_queryable(tables, &mut res, node); - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } + update_matches_query_routes(tables, &res); Resource::clean(&mut res) } } @@ -940,9 +935,12 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); + fn compute_query_routes_( + &self, + tables: &Tables, + routes: &mut QueryRoutes, + expr: &mut RoutingExpr, + ) { let indexes = hat!(tables) .peers_net .as_ref() @@ -961,62 +959,13 @@ impl HatQueriesTrait for HatCode { for idx in &indexes { let route = - self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Peer); + self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); routes.routers[idx.index()] = route.clone(); routes.peers[idx.index()] = route; } routes .clients .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.clients[0] = self.compute_query_route(tables, &mut expr, 0, WhatAmI::Peer); - - routes - } - - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - res_mut.context_mut().query_routes.routers.clear(); - res_mut - .context_mut() - .query_routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - res_mut.context_mut().query_routes.peers.clear(); - res_mut - .context_mut() - .query_routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - let route = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - res_mut.context_mut().query_routes.routers[idx.index()] = route.clone(); - res_mut.context_mut().query_routes.peers[idx.index()] = route; - } - - let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; - clients_query_routes.clear(); - clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - clients_query_routes[0] = self.compute_query_route(tables, &mut expr, 0, WhatAmI::Peer); - } + routes.clients[0] = self.compute_query_route(tables, expr, 0, WhatAmI::Peer); } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 69b0ecf877..f6b9aadfd6 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -36,6 +36,7 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; +use zenoh_sync::get_mut_unchecked; use zenoh_transport::TransportUnicast; mod client; @@ -138,9 +139,30 @@ pub(crate) trait HatPubSubTrait { fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc; - fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes; + fn compute_data_routes_( + &self, + tables: &Tables, + routes: &mut DataRoutes, + expr: &mut RoutingExpr, + ); + + fn compute_data_routes(&self, tables: &Tables, res: &Arc) -> DataRoutes { + let mut routes = DataRoutes::default(); + self.compute_data_routes_(tables, &mut routes, &mut RoutingExpr::new(res, "")); + routes + } - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc); + fn update_data_routes(&self, tables: &Tables, res: &Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + self.compute_data_routes_( + tables, + &mut res_mut.context_mut().data_routes, + &mut RoutingExpr::new(res, ""), + ); + } + } } pub(crate) trait HatQueriesTrait { @@ -166,8 +188,32 @@ pub(crate) trait HatQueriesTrait { source: NodeId, source_type: WhatAmI, ) -> Arc; - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc); - fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes; + + fn compute_query_routes_( + &self, + tables: &Tables, + routes: &mut QueryRoutes, + expr: &mut RoutingExpr, + ); + + fn compute_query_routes(&self, tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes::default(); + self.compute_query_routes_(tables, &mut routes, &mut RoutingExpr::new(res, "")); + routes + } + + fn update_query_routes(&self, tables: &Tables, res: &Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + self.compute_query_routes_( + tables, + &mut res_mut.context_mut().query_routes, + &mut RoutingExpr::new(res, ""), + ); + } + } + fn compute_local_replies( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index b03ef98dc0..1bca2f335b 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -232,13 +232,13 @@ impl HatBaseTrait for HatCode { for _match in subs_matches.drain(..) { matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &_match), )); } for _match in qabls_matches.drain(..) { matches_query_routes.push(( _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), + rtables.hat_code.compute_query_routes(&rtables, &_match), )); } drop(rtables); diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index abf7ee9948..1e3fdd3a6b 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -184,7 +184,7 @@ fn declare_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -289,7 +289,7 @@ fn forget_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -441,11 +441,13 @@ impl HatPubSubTrait for HatCode { Arc::new(pull_caches) } - fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - - let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + fn compute_data_routes_( + &self, + tables: &Tables, + routes: &mut DataRoutes, + expr: &mut RoutingExpr, + ) { + let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Peer); routes.routers.resize_with(1, || Arc::new(HashMap::new())); routes.routers[0] = route.clone(); @@ -453,43 +455,9 @@ impl HatPubSubTrait for HatCode { routes.peers.resize_with(1, || Arc::new(HashMap::new())); routes.peers[0] = route; - let route = self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); routes.clients.resize_with(1, || Arc::new(HashMap::new())); routes.clients[0] = route; - - // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); - routes - } - - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let route = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); - - let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; - routers_data_routes.clear(); - routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - routers_data_routes[0] = route.clone(); - - let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; - peers_data_routes.clear(); - peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - peers_data_routes[0] = route; - - let route = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - let clients_data_routes = &mut res_mut.context_mut().data_routes.clients; - clients_data_routes.clear(); - clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); - clients_data_routes[0] = route; - - // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); - } } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index db1041f359..c9dda3c4a4 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -178,7 +178,7 @@ fn declare_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -286,7 +286,7 @@ fn forget_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -437,11 +437,13 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - - let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + fn compute_query_routes_( + &self, + tables: &Tables, + routes: &mut QueryRoutes, + expr: &mut RoutingExpr, + ) { + let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Peer); routes .routers @@ -453,42 +455,11 @@ impl HatQueriesTrait for HatCode { .resize_with(1, || Arc::new(QueryTargetQablSet::new())); routes.peers[0] = route; - let route = self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); + let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); routes .clients .resize_with(1, || Arc::new(QueryTargetQablSet::new())); routes.clients[0] = route; - - routes - } - - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let route = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); - - let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; - routers_query_routes.clear(); - routers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routers_query_routes[0] = route.clone(); - - let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; - peers_query_routes.clear(); - peers_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - peers_query_routes[0] = route; - - let route = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - let clients_query_routes = &mut res_mut.context_mut().query_routes.clients; - clients_query_routes.clear(); - clients_query_routes.resize_with(1, || Arc::new(QueryTargetQablSet::new())); - clients_query_routes[0] = route; - } } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index e485a1d880..dea3667479 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -478,13 +478,13 @@ impl HatBaseTrait for HatCode { for _match in subs_matches.drain(..) { matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes_(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &_match), )); } for _match in qabls_matches.drain(..) { matches_query_routes.push(( _match.clone(), - rtables.hat_code.compute_query_routes_(&rtables, &_match), + rtables.hat_code.compute_query_routes(&rtables, &_match), )); } drop(rtables); diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 332f1af7fa..14b9a2afda 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -247,7 +247,7 @@ fn declare_router_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -327,7 +327,7 @@ fn declare_peer_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -427,7 +427,7 @@ fn declare_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -663,7 +663,7 @@ fn forget_router_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); for (mut res, data_routes) in matches_data_routes { @@ -730,7 +730,7 @@ fn forget_peer_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); for (mut res, data_routes) in matches_data_routes { @@ -806,7 +806,7 @@ fn forget_client_subscription( drop(wtables); let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -891,12 +891,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: { unregister_router_subscription(tables, &mut res, node); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } + update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) } } @@ -915,13 +910,7 @@ pub(super) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); } - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } + update_matches_data_routes(tables, &mut res); Resource::clean(&mut res) } } @@ -975,7 +964,7 @@ pub(super) fn pubsub_tree_change( } // recompute routes - compute_data_routes_from(tables, &mut tables.root_res.clone()); + update_data_routes_from(tables, &mut tables.root_res.clone()); } pub(super) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { @@ -1273,10 +1262,12 @@ impl HatPubSubTrait for HatCode { Arc::new(pull_caches) } - fn compute_data_routes_(&self, tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - + fn compute_data_routes_( + &self, + tables: &Tables, + routes: &mut DataRoutes, + expr: &mut RoutingExpr, + ) { let indexes = hat!(tables) .routers_net .as_ref() @@ -1291,12 +1282,11 @@ impl HatPubSubTrait for HatCode { for idx in &indexes { routes.routers[idx.index()] = - self.compute_data_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); + self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Router); } routes.peers.resize_with(1, || Arc::new(HashMap::new())); - routes.peers[0] = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + routes.peers[0] = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1312,78 +1302,12 @@ impl HatPubSubTrait for HatCode { .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); for idx in &indexes { - routes.peers[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); + routes.peers[idx.index()] = + self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); } } routes.clients.resize_with(1, || Arc::new(HashMap::new())); routes.clients[0] = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - // routes.matching_pulls = Some(self.compute_matching_pulls(tables, &mut expr)); - routes - } - - fn compute_data_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().data_routes.routers; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - peers_data_routes[0] = - self.compute_data_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); - - if hat!(tables).full_net(WhatAmI::Peer) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().data_routes.peers; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - peers_data_routes[idx.index()] = self.compute_data_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - // res_mut.context_mut().matching_pulls = self.compute_matching_pulls(tables, &mut expr); - } + self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index b0c716ad8d..2834dde8b2 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -395,7 +395,7 @@ fn declare_router_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -474,7 +474,7 @@ fn declare_peer_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -556,7 +556,7 @@ fn declare_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -793,7 +793,7 @@ fn forget_router_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -860,7 +860,7 @@ fn forget_peer_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -941,7 +941,7 @@ fn forget_client_queryable( drop(wtables); let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); @@ -1027,12 +1027,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: for mut res in qabls { unregister_router_queryable(tables, &mut res, node); - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } + update_matches_query_routes(tables, &res); Resource::clean(&mut res); } } @@ -1057,12 +1052,7 @@ pub(super) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: register_router_queryable(tables, None, &mut res, &local_info, tables.zid); } - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } + update_matches_query_routes(tables, &res); Resource::clean(&mut res) } } @@ -1438,10 +1428,12 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes::default(); - let mut expr = RoutingExpr::new(res, ""); - + fn compute_query_routes_( + &self, + tables: &Tables, + routes: &mut QueryRoutes, + expr: &mut RoutingExpr, + ) { let indexes = hat!(tables) .routers_net .as_ref() @@ -1456,14 +1448,13 @@ impl HatQueriesTrait for HatCode { for idx in &indexes { routes.routers[idx.index()] = - self.compute_query_route(tables, &mut expr, idx.index() as NodeId, WhatAmI::Router); + self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Router); } routes .peers .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.peers[0] = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); + routes.peers[0] = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Peer); if hat!(tables).full_net(WhatAmI::Peer) { let indexes = hat!(tables) @@ -1479,81 +1470,14 @@ impl HatQueriesTrait for HatCode { .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); for idx in &indexes { - routes.peers[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); + routes.peers[idx.index()] = + self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); } } routes .clients .resize_with(1, || Arc::new(QueryTargetQablSet::new())); routes.clients[0] = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Client); - - routes - } - - fn compute_query_routes(&self, tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().query_routes.routers; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Router, - ); - } - - let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - peers_query_routes[0] = - self.compute_query_route(tables, &mut expr, NodeId::default(), WhatAmI::Peer); - - if hat!(tables).full_net(WhatAmI::Peer) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().query_routes.peers; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = self.compute_query_route( - tables, - &mut expr, - idx.index() as NodeId, - WhatAmI::Peer, - ); - } - } - } + self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index c97ce8225e..12c420ec03 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -200,7 +200,7 @@ impl Router { // recompute routes let mut root_res = tables.root_res.clone(); - compute_data_routes_from(&mut tables, &mut root_res); + update_data_routes_from(&mut tables, &mut root_res); Ok(()) } @@ -234,7 +234,7 @@ impl Router { // recompute routes let mut root_res = tables.root_res.clone(); - compute_data_routes_from(&mut tables, &mut root_res); + update_data_routes_from(&mut tables, &mut root_res); Ok(Arc::new(DeMux::new( Face { tables: self.tables.clone(), From 43fcb78f7196dda3e2edc83d05f190704c2ab3f8 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 22 Dec 2023 22:42:18 +0100 Subject: [PATCH 024/122] Reintroduce matching pulls precomputation --- zenoh/src/net/routing/dispatcher/pubsub.rs | 24 +++++++---- zenoh/src/net/routing/dispatcher/resource.rs | 15 ++++--- zenoh/src/net/routing/dispatcher/tables.rs | 2 +- zenoh/src/net/routing/hat/client/mod.rs | 9 +++- zenoh/src/net/routing/hat/client/pubsub.rs | 21 +++++++--- .../src/net/routing/hat/linkstate_peer/mod.rs | 9 +++- .../net/routing/hat/linkstate_peer/pubsub.rs | 31 ++++++++++---- zenoh/src/net/routing/hat/mod.rs | 34 +++++++++++++-- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 9 +++- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 21 +++++++--- zenoh/src/net/routing/hat/router/mod.rs | 9 +++- zenoh/src/net/routing/hat/router/pubsub.rs | 41 ++++++++++++++----- 12 files changed, 170 insertions(+), 55 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 85bd72aec5..e699ea3892 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -27,6 +27,7 @@ use zenoh_sync::get_mut_unchecked; pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { tables.hat_code.clone().update_data_routes(tables, res); + tables.hat_code.clone().update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { update_data_routes_from(tables, child); @@ -36,18 +37,22 @@ pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc( tables: &'a Tables, res: &'a Arc, -) -> Vec<(Arc, DataRoutes)> { +) -> Vec<(Arc, DataRoutes, Arc)> { let mut routes = vec![]; if res.context.is_some() { + let mut expr = RoutingExpr::new(res, ""); routes.push(( res.clone(), - tables.hat_code.compute_data_routes(tables, res), + tables.hat_code.compute_data_routes(tables, &mut expr), + tables.hat_code.compute_matching_pulls(tables, &mut expr), )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = tables.hat_code.compute_data_routes(tables, &match_); - routes.push((match_, match_routes)); + let mut expr = RoutingExpr::new(&match_, ""); + let match_routes = tables.hat_code.compute_data_routes(tables, &mut expr); + let matching_pulls = tables.hat_code.compute_matching_pulls(tables, &mut expr); + routes.push((match_, match_routes, matching_pulls)); } } } @@ -57,10 +62,12 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { tables.hat_code.update_data_routes(tables, res); + tables.hat_code.update_matching_pulls(tables, res); for match_ in &res.context().matches { - let match_ = match_.upgrade().unwrap(); + let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - tables.hat_code.update_data_routes(tables, &match_); + tables.hat_code.update_data_routes(tables, &mut match_); + tables.hat_code.update_matching_pulls(tables, &mut match_); } } } @@ -75,6 +82,9 @@ pub(crate) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc Arc { res.as_ref() .and_then(|res| res.context.as_ref()) - .map(|ctx| ctx.matching_pulls.clone()) + .and_then(|ctx| ctx.matching_pulls.clone()) .unwrap_or_else(|| tables.hat_code.compute_matching_pulls(tables, expr)) } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 00b7b00667..37e3a5b91b 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -112,7 +112,7 @@ impl QueryRoutes { pub(crate) struct ResourceContext { pub(crate) matches: Vec>, - pub(crate) matching_pulls: Arc, + pub(crate) matching_pulls: Option>, pub(crate) hat: Box, pub(crate) valid_data_routes: bool, pub(crate) data_routes: DataRoutes, @@ -124,7 +124,7 @@ impl ResourceContext { fn new(hat: Box) -> ResourceContext { ResourceContext { matches: Vec::new(), - matching_pulls: Arc::new(Vec::new()), + matching_pulls: None, hat, valid_data_routes: false, data_routes: DataRoutes::default(), @@ -135,9 +135,6 @@ impl ResourceContext { pub(crate) fn update_data_routes(&mut self, data_routes: DataRoutes) { self.valid_data_routes = true; - // if let Some(matching_pulls) = data_routes.matching_pulls { - // self.matching_pulls = matching_pulls; - // } self.data_routes = data_routes; } @@ -153,6 +150,14 @@ impl ResourceContext { pub(crate) fn disable_query_routes(&mut self) { self.valid_query_routes = false; } + + pub(crate) fn update_matching_pulls(&mut self, pulls: Arc) { + self.matching_pulls = Some(pulls); + } + + pub(crate) fn disable_matching_pulls(&mut self) { + self.matching_pulls = None; + } } pub struct Resource { diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 787689e1e6..2b10f41b5d 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -134,7 +134,7 @@ impl Tables { } fn compute_routes(&mut self, res: &mut Arc) { - self.hat_code.clone().compute_data_routes(self, res); + self.hat_code.clone().update_data_routes(self, res); self.hat_code.clone().compute_query_routes(self, res); } diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 67c56ebdf3..f8f24ad86c 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -186,9 +186,11 @@ impl HatBaseTrait for HatCode { let mut matches_query_routes = vec![]; let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { + let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &mut expr), + rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { @@ -200,10 +202,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 78291381b2..97c5f4b927 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -188,10 +188,13 @@ fn declare_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -293,10 +296,13 @@ fn forget_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -414,12 +420,16 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; + fn compute_matching_pulls_( + &self, + tables: &Tables, + pull_caches: &mut PullCaches, + expr: &mut RoutingExpr, + ) { let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { ke } else { - return Arc::new(pull_caches); + return; }; let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res @@ -438,7 +448,6 @@ impl HatPubSubTrait for HatCode { } } } - Arc::new(pull_caches) } fn compute_data_routes_( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 21d2afdec8..fad6b98384 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -302,9 +302,11 @@ impl HatBaseTrait for HatCode { let mut matches_query_routes = vec![]; let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { + let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &mut expr), + rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { @@ -316,10 +318,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 647c864010..b8aadd50b6 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -228,10 +228,13 @@ fn declare_peer_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -328,10 +331,13 @@ fn declare_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -509,10 +515,13 @@ fn forget_peer_subscription( let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -583,10 +592,13 @@ fn forget_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -827,12 +839,16 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; + fn compute_matching_pulls_( + &self, + tables: &Tables, + pull_caches: &mut PullCaches, + expr: &mut RoutingExpr, + ) { let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { ke } else { - return Arc::new(pull_caches); + return; }; let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res @@ -851,7 +867,6 @@ impl HatPubSubTrait for HatCode { } } } - Arc::new(pull_caches) } fn compute_data_routes_( diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index f6b9aadfd6..a14d029f56 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -137,7 +137,33 @@ pub(crate) trait HatPubSubTrait { source_type: WhatAmI, ) -> Arc; - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc; + fn compute_matching_pulls_( + &self, + tables: &Tables, + pull_caches: &mut PullCaches, + expr: &mut RoutingExpr, + ); + + fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = PullCaches::default(); + self.compute_matching_pulls_(tables, &mut pull_caches, expr); + Arc::new(pull_caches) + } + + fn update_matching_pulls(&self, tables: &Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + if res_mut.context_mut().matching_pulls.is_none() { + res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); + } + self.compute_matching_pulls_( + tables, + get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), + &mut RoutingExpr::new(res, ""), + ); + } + } fn compute_data_routes_( &self, @@ -146,13 +172,13 @@ pub(crate) trait HatPubSubTrait { expr: &mut RoutingExpr, ); - fn compute_data_routes(&self, tables: &Tables, res: &Arc) -> DataRoutes { + fn compute_data_routes(&self, tables: &Tables, expr: &mut RoutingExpr) -> DataRoutes { let mut routes = DataRoutes::default(); - self.compute_data_routes_(tables, &mut routes, &mut RoutingExpr::new(res, "")); + self.compute_data_routes_(tables, &mut routes, expr); routes } - fn update_data_routes(&self, tables: &Tables, res: &Arc) { + fn update_data_routes(&self, tables: &Tables, res: &mut Arc) { if res.context.is_some() { let mut res_mut = res.clone(); let res_mut = get_mut_unchecked(&mut res_mut); diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 1bca2f335b..2e761acf39 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -230,9 +230,11 @@ impl HatBaseTrait for HatCode { let mut matches_query_routes = vec![]; let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { + let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &mut expr), + rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { @@ -244,10 +246,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 1e3fdd3a6b..e3685d66f2 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -188,10 +188,13 @@ fn declare_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -293,10 +296,13 @@ fn forget_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -414,12 +420,16 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; + fn compute_matching_pulls_( + &self, + tables: &Tables, + pull_caches: &mut PullCaches, + expr: &mut RoutingExpr, + ) { let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { ke } else { - return Arc::new(pull_caches); + return; }; let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res @@ -438,7 +448,6 @@ impl HatPubSubTrait for HatCode { } } } - Arc::new(pull_caches) } fn compute_data_routes_( diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index dea3667479..b6adabe2a7 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -476,9 +476,11 @@ impl HatBaseTrait for HatCode { let mut matches_query_routes = vec![]; let rtables = zread!(tables.tables); for _match in subs_matches.drain(..) { + let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &_match), + rtables.hat_code.compute_data_routes(&rtables, &mut expr), + rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { @@ -490,10 +492,13 @@ impl HatBaseTrait for HatCode { drop(rtables); let mut wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); Resource::clean(&mut res); } for (mut res, query_routes) in matches_query_routes { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 14b9a2afda..b6a133c2eb 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -251,10 +251,13 @@ fn declare_router_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -331,10 +334,13 @@ fn declare_peer_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -431,10 +437,13 @@ fn declare_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } drop(wtables); } @@ -666,10 +675,13 @@ fn forget_router_subscription( let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -733,10 +745,13 @@ fn forget_peer_subscription( let matches_data_routes = compute_matches_data_routes(&rtables, &res); drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -810,10 +825,13 @@ fn forget_client_subscription( drop(rtables); let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { + for (mut res, data_routes, matching_pulls) in matches_data_routes { get_mut_unchecked(&mut res) .context_mut() .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); } Resource::clean(&mut res); drop(wtables); @@ -1235,12 +1253,16 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; + fn compute_matching_pulls_( + &self, + tables: &Tables, + pull_caches: &mut PullCaches, + expr: &mut RoutingExpr, + ) { let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { ke } else { - return Arc::new(pull_caches); + return; }; let res = Resource::get_resource(expr.prefix, expr.suffix); let matches = res @@ -1259,7 +1281,6 @@ impl HatPubSubTrait for HatCode { } } } - Arc::new(pull_caches) } fn compute_data_routes_( From 7cd513ad7b4850b58ea7d33beacf2ebe09030af7 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Jan 2024 11:46:33 +0100 Subject: [PATCH 025/122] Perf improvements --- zenoh/src/net/primitives/demux.rs | 23 ++++-- zenoh/src/net/routing/dispatcher/face.rs | 1 + zenoh/src/net/routing/hat/client/mod.rs | 1 + .../src/net/routing/hat/linkstate_peer/mod.rs | 1 + zenoh/src/net/routing/hat/p2p_peer/mod.rs | 1 + zenoh/src/net/routing/hat/router/mod.rs | 1 + zenoh/src/net/routing/router.rs | 72 ++----------------- zenoh/src/net/runtime/mod.rs | 4 +- 8 files changed, 30 insertions(+), 74 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index f9694a16f7..003d50d8bd 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -17,20 +17,30 @@ use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_result::ZResult; -use zenoh_transport::TransportPeerEventHandler; +use zenoh_transport::{TransportPeerEventHandler, TransportUnicast}; pub struct DeMux { face: Face, + pub(crate) transport: Option, pub(crate) intercept: IngressIntercept, } impl DeMux { - pub(crate) fn new(face: Face, intercept: IngressIntercept) -> Self { - Self { face, intercept } + pub(crate) fn new( + face: Face, + transport: Option, + intercept: IngressIntercept, + ) -> Self { + Self { + face, + transport, + intercept, + } } } impl TransportPeerEventHandler for DeMux { + #[inline] fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { let ctx = RoutingContext::with_face(msg, self.face.clone()); let ctx = match self.intercept.intercept(ctx) { @@ -39,8 +49,8 @@ impl TransportPeerEventHandler for DeMux { }; match ctx.msg.body { - NetworkBody::Declare(m) => self.face.send_declare(m), NetworkBody::Push(m) => self.face.send_push(m), + NetworkBody::Declare(m) => self.face.send_declare(m), NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), @@ -65,6 +75,11 @@ impl TransportPeerEventHandler for DeMux { fn closing(&self) { self.face.send_close(); + if let Some(transport) = self.transport.as_ref() { + let ctrl_lock = zlock!(self.face.tables.ctrl_lock); + let mut tables = zwrite!(self.face.tables.tables); + let _ = ctrl_lock.closing(&mut tables, &self.face.tables, transport); + } } fn closed(&self) {} diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 4a8bfe64bf..b9562ca74a 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -156,6 +156,7 @@ impl Primitives for Face { drop(ctrl_lock); } + #[inline] fn send_push(&self, msg: Push) { full_reentrant_route_data( &self.tables.tables, diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index f8f24ad86c..b6480aa863 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -231,6 +231,7 @@ impl HatBaseTrait for HatCode { Ok(()) } + #[inline] fn map_routing_context( &self, _tables: &Tables, diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index fad6b98384..2f1e293237 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -373,6 +373,7 @@ impl HatBaseTrait for HatCode { Ok(()) } + #[inline] fn map_routing_context( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 2e761acf39..98f591059a 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -294,6 +294,7 @@ impl HatBaseTrait for HatCode { Ok(()) } + #[inline] fn map_routing_context( &self, _tables: &Tables, diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index b6adabe2a7..656a9217bf 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -604,6 +604,7 @@ impl HatBaseTrait for HatCode { Ok(()) } + #[inline] fn map_routing_context( &self, tables: &Tables, diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 12c420ec03..60fcfccfa9 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -27,18 +27,13 @@ use crate::net::primitives::EPrimitives; use crate::net::primitives::McastMux; use crate::net::primitives::Mux; use crate::net::routing::interceptor::IngressIntercept; -use std::any::Any; use std::str::FromStr; use std::sync::Arc; use std::sync::{Mutex, RwLock}; use uhlc::HLC; use zenoh_config::Config; -use zenoh_link::Link; use zenoh_protocol::core::{WhatAmI, ZenohId}; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; -use zenoh_transport::{ - TransportMulticast, TransportPeer, TransportPeerEventHandler, TransportUnicast, -}; +use zenoh_transport::{TransportMulticast, TransportPeer, TransportUnicast}; // use zenoh_collections::Timer; use zenoh_result::ZResult; @@ -106,10 +101,7 @@ impl Router { Arc::new(face) } - pub fn new_transport_unicast( - &self, - transport: TransportUnicast, - ) -> ZResult> { + pub fn new_transport_unicast(&self, transport: TransportUnicast) -> ZResult> { let ctrl_lock = zlock!(self.tables.ctrl_lock); let mut tables = zwrite!(self.tables.tables); @@ -162,12 +154,7 @@ impl Router { ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; - Ok(Arc::new(LinkStateInterceptor::new( - transport.clone(), - self.tables.clone(), - face, - ingress, - ))) + Ok(Arc::new(DeMux::new(face, Some(transport), ingress))) } pub fn new_transport_multicast(&self, transport: TransportMulticast) -> ZResult<()> { @@ -240,59 +227,8 @@ impl Router { tables: self.tables.clone(), state: face_state, }, + None, intercept, ))) } } - -pub struct LinkStateInterceptor { - pub(crate) transport: TransportUnicast, - pub(crate) tables: Arc, - pub(crate) demux: DeMux, -} - -impl LinkStateInterceptor { - fn new( - transport: TransportUnicast, - tables: Arc, - face: Face, - ingress: IngressIntercept, - ) -> Self { - LinkStateInterceptor { - transport, - tables, - demux: DeMux::new(face, ingress), - } - } -} - -impl TransportPeerEventHandler for LinkStateInterceptor { - fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { - log::trace!("Recv {:?}", msg); - match msg.body { - NetworkBody::OAM(oam) => { - let ctrl_lock = zlock!(self.tables.ctrl_lock); - let mut tables = zwrite!(self.tables.tables); - ctrl_lock.handle_oam(&mut tables, &self.tables, oam, &self.transport) - } - _ => self.demux.handle_message(msg), - } - } - - fn new_link(&self, _link: Link) {} - - fn del_link(&self, _link: Link) {} - - fn closing(&self) { - self.demux.closing(); - let ctrl_lock = zlock!(self.tables.ctrl_lock); - let mut tables = zwrite!(self.tables.tables); - let _ = ctrl_lock.closing(&mut tables, &self.tables, &self.transport); - } - - fn closed(&self) {} - - fn as_any(&self) -> &dyn Any { - self - } -} diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs index a3574914ea..3573b093d3 100644 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -22,7 +22,7 @@ pub mod orchestrator; use super::primitives::DeMux; use super::routing; -use super::routing::router::{LinkStateInterceptor, Router}; +use super::routing::router::Router; use crate::config::{unwrap_or_default, Config, ModeDependent, Notifier}; use crate::GIT_VERSION; pub use adminspace::AdminSpace; @@ -236,7 +236,7 @@ impl TransportEventHandler for RuntimeTransportEventHandler { pub(super) struct RuntimeSession { pub(super) runtime: Runtime, pub(super) endpoint: std::sync::RwLock>, - pub(super) main_handler: Arc, + pub(super) main_handler: Arc, pub(super) slave_handlers: Vec>, } From b530da20fc2a4a23303fe79f8e650400189a8fd2 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Fri, 5 Jan 2024 21:00:27 +0100 Subject: [PATCH 026/122] Perf improvements --- zenoh/src/net/primitives/demux.rs | 27 +++++++---- zenoh/src/net/primitives/mux.rs | 40 ++++++++++------ zenoh/src/net/routing/dispatcher/face.rs | 15 +++++- zenoh/src/net/routing/dispatcher/pubsub.rs | 28 +++++++---- zenoh/src/net/routing/dispatcher/tables.rs | 14 ++++++ zenoh/src/net/routing/mod.rs | 54 +++++++++++++++++++++- zenoh/src/net/routing/router.rs | 16 +++---- 7 files changed, 148 insertions(+), 46 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 003d50d8bd..545b2040cb 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -12,7 +12,11 @@ // ZettaScale Zenoh Team, // use super::Primitives; -use crate::net::routing::{dispatcher::face::Face, interceptor::IngressIntercept, RoutingContext}; +use crate::net::routing::{ + dispatcher::face::Face, + interceptor::{InterceptTrait, InterceptsChain}, + RoutingContext, +}; use std::any::Any; use zenoh_link::Link; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; @@ -22,14 +26,14 @@ use zenoh_transport::{TransportPeerEventHandler, TransportUnicast}; pub struct DeMux { face: Face, pub(crate) transport: Option, - pub(crate) intercept: IngressIntercept, + pub(crate) intercept: InterceptsChain, } impl DeMux { pub(crate) fn new( face: Face, transport: Option, - intercept: IngressIntercept, + intercept: InterceptsChain, ) -> Self { Self { face, @@ -41,14 +45,17 @@ impl DeMux { impl TransportPeerEventHandler for DeMux { #[inline] - fn handle_message(&self, msg: NetworkMessage) -> ZResult<()> { - let ctx = RoutingContext::with_face(msg, self.face.clone()); - let ctx = match self.intercept.intercept(ctx) { - Some(ctx) => ctx, - None => return Ok(()), - }; + fn handle_message(&self, mut msg: NetworkMessage) -> ZResult<()> { + if !self.intercept.intercepts.is_empty() { + let ctx = RoutingContext::new_in(msg, self.face.clone()); + let ctx = match self.intercept.intercept(ctx) { + Some(ctx) => ctx, + None => return Ok(()), + }; + msg = ctx.msg; + } - match ctx.msg.body { + match msg.body { NetworkBody::Push(m) => self.face.send_push(m), NetworkBody::Declare(m) => self.face.send_declare(m), NetworkBody::Request(m) => self.face.send_request(m), diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 67077004a1..3eefc6f2eb 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use super::{EPrimitives, Primitives}; use crate::net::routing::{ dispatcher::{face::Face, tables::TablesLock}, - interceptor::EgressIntercept, + interceptor::{InterceptTrait, InterceptsChain}, RoutingContext, }; use zenoh_protocol::network::{ @@ -28,7 +28,7 @@ pub struct Mux { pub handler: TransportUnicast, pub(crate) fid: usize, pub(crate) tables: Arc, - pub(crate) intercept: EgressIntercept, + pub(crate) intercept: InterceptsChain, } impl Mux { @@ -36,7 +36,7 @@ impl Mux { handler: TransportUnicast, fid: usize, tables: Arc, - intercept: EgressIntercept, + intercept: InterceptsChain, ) -> Mux { Mux { handler, @@ -58,7 +58,7 @@ impl Primitives for Mux { let face = tables.faces.get(&self.fid).cloned(); drop(tables); if let Some(face) = face { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -81,7 +81,7 @@ impl Primitives for Mux { let face = tables.faces.get(&self.fid).cloned(); drop(tables); if let Some(face) = face { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -104,7 +104,7 @@ impl Primitives for Mux { let face = tables.faces.get(&self.fid).cloned(); drop(tables); if let Some(face) = face { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -127,7 +127,7 @@ impl Primitives for Mux { let face = tables.faces.get(&self.fid).cloned(); drop(tables); if let Some(face) = face { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -150,7 +150,7 @@ impl Primitives for Mux { let face = tables.faces.get(&self.fid).cloned(); drop(tables); if let Some(face) = face { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -177,6 +177,7 @@ impl EPrimitives for Mux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -193,6 +194,7 @@ impl EPrimitives for Mux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -209,6 +211,7 @@ impl EPrimitives for Mux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -225,6 +228,7 @@ impl EPrimitives for Mux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -241,6 +245,7 @@ impl EPrimitives for Mux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -258,7 +263,7 @@ pub struct McastMux { pub handler: TransportMulticast, pub(crate) fid: usize, pub(crate) tables: Arc, - pub(crate) intercept: EgressIntercept, + pub(crate) intercept: InterceptsChain, } impl McastMux { @@ -266,7 +271,7 @@ impl McastMux { handler: TransportMulticast, fid: usize, tables: Arc, - intercept: EgressIntercept, + intercept: InterceptsChain, ) -> McastMux { McastMux { handler, @@ -285,7 +290,7 @@ impl Primitives for McastMux { size: None, }; if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -305,7 +310,7 @@ impl Primitives for McastMux { size: None, }; if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -325,7 +330,7 @@ impl Primitives for McastMux { size: None, }; if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -345,7 +350,7 @@ impl Primitives for McastMux { size: None, }; if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -365,7 +370,7 @@ impl Primitives for McastMux { size: None, }; if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::with_face( + let ctx = RoutingContext::new_in( msg, Face { tables: self.tables.clone(), @@ -392,6 +397,7 @@ impl EPrimitives for McastMux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -408,6 +414,7 @@ impl EPrimitives for McastMux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -424,6 +431,7 @@ impl EPrimitives for McastMux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -440,6 +448,7 @@ impl EPrimitives for McastMux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; @@ -456,6 +465,7 @@ impl EPrimitives for McastMux { size: None, }, inface: ctx.inface, + outface: ctx.outface, prefix: ctx.prefix, full_expr: ctx.full_expr, }; diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index b9562ca74a..9bb2cb30aa 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -82,6 +82,19 @@ impl FaceState { } } + #[inline] + #[allow(clippy::trivially_copy_pass_by_ref)] + pub(crate) fn get_sent_mapping( + &self, + prefixid: &ExprId, + mapping: Mapping, + ) -> Option<&std::sync::Arc> { + match mapping { + Mapping::Sender => self.local_mappings.get(prefixid), + Mapping::Receiver => self.remote_mappings.get(prefixid), + } + } + pub(crate) fn get_next_local_id(&self) -> ExprId { let mut id = 1; while self.local_mappings.get(&id).is_some() || self.remote_mappings.get(&id).is_some() { @@ -159,7 +172,7 @@ impl Primitives for Face { #[inline] fn send_push(&self, msg: Push) { full_reentrant_route_data( - &self.tables.tables, + &self.tables, &self.state, &msg.wire_expr, msg.ext_qos, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index e699ea3892..ff6ebd58aa 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -13,7 +13,8 @@ // use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; -use super::tables::{NodeId, Route, RoutingExpr, Tables}; +use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; +use crate::net::routing::dispatcher::face::Face; use crate::net::routing::RoutingContext; use std::sync::Arc; use std::sync::RwLock; @@ -199,14 +200,14 @@ macro_rules! inc_stats { #[allow(clippy::too_many_arguments)] pub fn full_reentrant_route_data( - tables_ref: &RwLock, + tables_ref: &Arc, face: &FaceState, expr: &WireExpr, ext_qos: ext::QoSType, mut payload: PushBody, routing_context: NodeId, ) { - let tables = zread!(tables_ref); + let tables = zread!(tables_ref.tables); match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { Some(prefix) => { log::trace!( @@ -249,7 +250,7 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::with_expr( + outface.primitives.send_push(RoutingContext::new_out( Push { wire_expr: key_expr.into(), ext_qos, @@ -257,7 +258,10 @@ pub fn full_reentrant_route_data( ext_nodeid: ext::NodeIdType { node_id: *context }, payload, }, - expr.full_expr().to_string(), + Face { + tables: tables_ref.clone(), + state: outface.clone(), + }, )) } } else { @@ -287,7 +291,7 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::with_expr( + outface.primitives.send_push(RoutingContext::new_out( Push { wire_expr: key_expr, ext_qos, @@ -295,7 +299,10 @@ pub fn full_reentrant_route_data( ext_nodeid: ext::NodeIdType { node_id: context }, payload: payload.clone(), }, - expr.full_expr().to_string(), + Face { + tables: tables_ref.clone(), + state: outface.clone(), + }, )) } } else { @@ -317,7 +324,7 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::with_expr( + outface.primitives.send_push(RoutingContext::new_out( Push { wire_expr: key_expr.into(), ext_qos, @@ -325,7 +332,10 @@ pub fn full_reentrant_route_data( ext_nodeid: ext::NodeIdType { node_id: *context }, payload: payload.clone(), }, - expr.full_expr().to_string(), + Face { + tables: tables_ref.clone(), + state: outface.clone(), + }, )) } } diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 2b10f41b5d..08e2f0a446 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -128,6 +128,20 @@ impl Tables { } } + #[inline] + #[allow(clippy::trivially_copy_pass_by_ref)] + pub(crate) fn get_sent_mapping<'a>( + &'a self, + face: &'a FaceState, + expr_id: &ExprId, + mapping: Mapping, + ) -> Option<&'a Arc> { + match expr_id { + 0 => Some(&self.root_res), + expr_id => face.get_sent_mapping(expr_id, mapping), + } + } + #[inline] pub(crate) fn get_face(&self, zid: &ZenohId) -> Option<&Arc> { self.faces.values().find(|face| face.zid == *zid) diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 527ee229a1..0b069c1337 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -35,32 +35,65 @@ pub(crate) static PREFIX_LIVELINESS: &str = "@/liveliness"; pub(crate) struct RoutingContext { pub(crate) msg: Msg, pub(crate) inface: OnceCell, + pub(crate) outface: OnceCell, pub(crate) prefix: OnceCell>, pub(crate) full_expr: OnceCell, } impl RoutingContext { - pub(crate) fn with_face(msg: Msg, inface: Face) -> Self { + #[allow(dead_code)] + pub(crate) fn new(msg: Msg) -> Self { + Self { + msg, + inface: OnceCell::new(), + outface: OnceCell::new(), + prefix: OnceCell::new(), + full_expr: OnceCell::new(), + } + } + + #[allow(dead_code)] + pub(crate) fn new_in(msg: Msg, inface: Face) -> Self { Self { msg, inface: OnceCell::from(inface), + outface: OnceCell::new(), + prefix: OnceCell::new(), + full_expr: OnceCell::new(), + } + } + + #[allow(dead_code)] + pub(crate) fn new_out(msg: Msg, outface: Face) -> Self { + Self { + msg, + inface: OnceCell::new(), + outface: OnceCell::from(outface), prefix: OnceCell::new(), full_expr: OnceCell::new(), } } + #[allow(dead_code)] pub(crate) fn with_expr(msg: Msg, expr: String) -> Self { Self { msg, inface: OnceCell::new(), + outface: OnceCell::new(), prefix: OnceCell::new(), full_expr: OnceCell::from(expr), } } + #[allow(dead_code)] pub(crate) fn inface(&self) -> Option<&Face> { self.inface.get() } + + #[allow(dead_code)] + pub(crate) fn outface(&self) -> Option<&Face> { + self.outface.get() + } } impl RoutingContext { @@ -95,6 +128,25 @@ impl RoutingContext { if self.full_expr.get().is_some() { return Some(self.full_expr.get().as_ref().unwrap()); } + if let Some(face) = self.outface.get() { + if let Some(wire_expr) = self.wire_expr() { + let wire_expr = wire_expr.to_owned(); + if self.prefix.get().is_none() { + if let Some(prefix) = zread!(face.tables.tables) + .get_sent_mapping(&face.state, &wire_expr.scope, wire_expr.mapping) + .cloned() + { + let _ = self.prefix.set(prefix); + } + } + if let Some(prefix) = self.prefix.get().cloned() { + let _ = self + .full_expr + .set(prefix.expr() + wire_expr.suffix.as_ref()); + return Some(self.full_expr.get().as_ref().unwrap()); + } + } + } if let Some(face) = self.inface.get() { if let Some(wire_expr) = self.wire_expr() { let wire_expr = wire_expr.to_owned(); diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 60fcfccfa9..966dec3670 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -117,12 +117,8 @@ impl Router { .map(|itor| itor.new_transport_unicast(&transport)) .unzip(); let (ingress, egress) = ( - Box::new(InterceptsChain::from( - ingress.into_iter().flatten().collect::>(), - )), - Box::new(InterceptsChain::from( - egress.into_iter().flatten().collect::>(), - )), + InterceptsChain::from(ingress.into_iter().flatten().collect::>()), + InterceptsChain::from(egress.into_iter().flatten().collect::>()), ); let newface = tables .faces @@ -162,13 +158,13 @@ impl Router { let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; - let intercept = Box::new(InterceptsChain::from( + let intercept = InterceptsChain::from( tables .interceptors .iter() .filter_map(|itor| itor.new_transport_multicast(&transport)) .collect::>(), - )); + ); tables.mcast_groups.push(FaceState::new( fid, ZenohId::from_str("1").unwrap(), @@ -200,13 +196,13 @@ impl Router { let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; - let intercept = Box::new(InterceptsChain::from( + let intercept = InterceptsChain::from( tables .interceptors .iter() .filter_map(|itor| itor.new_peer_multicast(&transport)) .collect::>(), - )); + ); let face_state = FaceState::new( fid, peer.zid, From d5fcdfa23a1501a3df3579d8b30b292d52c43783 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 9 Jan 2024 16:04:34 +0100 Subject: [PATCH 027/122] Remove files wrongly reintroduced by merge --- zenoh/src/net/routing/face.rs | 452 ------ zenoh/src/net/routing/pubsub.rs | 1957 ------------------------- zenoh/src/net/routing/queries.rs | 2352 ------------------------------ 3 files changed, 4761 deletions(-) delete mode 100644 zenoh/src/net/routing/face.rs delete mode 100644 zenoh/src/net/routing/pubsub.rs delete mode 100644 zenoh/src/net/routing/queries.rs diff --git a/zenoh/src/net/routing/face.rs b/zenoh/src/net/routing/face.rs deleted file mode 100644 index 0d2ee926d1..0000000000 --- a/zenoh/src/net/routing/face.rs +++ /dev/null @@ -1,452 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use super::router::*; -use std::collections::{HashMap, HashSet}; -use std::fmt; -use std::sync::Arc; -use zenoh_protocol::zenoh::RequestBody; -use zenoh_protocol::{ - core::{ExprId, WhatAmI, ZenohId}, - network::{ - declare::queryable::ext::QueryableInfo, Mapping, Push, Request, RequestId, Response, - ResponseFinal, - }, -}; -#[cfg(feature = "stats")] -use zenoh_transport::stats::TransportStats; -use zenoh_transport::{multicast::TransportMulticast, primitives::Primitives}; - -pub struct FaceState { - pub(super) id: usize, - pub(super) zid: ZenohId, - pub(super) whatami: WhatAmI, - pub(super) local: bool, - #[cfg(feature = "stats")] - pub(super) stats: Option>, - pub(super) primitives: Arc, - pub(super) link_id: usize, - pub(super) local_mappings: HashMap>, - pub(super) remote_mappings: HashMap>, - pub(super) local_subs: HashSet>, - pub(super) remote_subs: HashSet>, - pub(super) local_qabls: HashMap, QueryableInfo>, - pub(super) remote_qabls: HashSet>, - pub(super) next_qid: RequestId, - pub(super) pending_queries: HashMap>, - pub(super) mcast_group: Option, -} - -impl FaceState { - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - id: usize, - zid: ZenohId, - whatami: WhatAmI, - local: bool, - #[cfg(feature = "stats")] stats: Option>, - primitives: Arc, - link_id: usize, - mcast_group: Option, - ) -> Arc { - Arc::new(FaceState { - id, - zid, - whatami, - local, - #[cfg(feature = "stats")] - stats, - primitives, - link_id, - local_mappings: HashMap::new(), - remote_mappings: HashMap::new(), - local_subs: HashSet::new(), - remote_subs: HashSet::new(), - local_qabls: HashMap::new(), - remote_qabls: HashSet::new(), - next_qid: 0, - pending_queries: HashMap::new(), - mcast_group, - }) - } - - #[inline] - pub fn is_local(&self) -> bool { - self.local - } - - #[inline] - #[allow(clippy::trivially_copy_pass_by_ref)] - pub(super) fn get_mapping( - &self, - prefixid: &ExprId, - mapping: Mapping, - ) -> Option<&std::sync::Arc> { - match mapping { - Mapping::Sender => self.remote_mappings.get(prefixid), - Mapping::Receiver => self.local_mappings.get(prefixid), - } - } - - pub(super) fn get_next_local_id(&self) -> ExprId { - let mut id = 1; - while self.local_mappings.get(&id).is_some() || self.remote_mappings.get(&id).is_some() { - id += 1; - } - id - } - - pub(super) fn get_router(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables.routers_net.as_ref().unwrap().get_link(self.link_id) { - Some(link) => match link.get_zid(nodeid) { - Some(router) => Some(*router), - None => { - log::error!( - "Received router declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in routers network for {}", - self - ); - None - } - } - } - - pub(super) fn get_peer(&self, tables: &Tables, nodeid: &u64) -> Option { - match tables.peers_net.as_ref().unwrap().get_link(self.link_id) { - Some(link) => match link.get_zid(nodeid) { - Some(router) => Some(*router), - None => { - log::error!( - "Received peer declaration with unknown routing context id {}", - nodeid - ); - None - } - }, - None => { - log::error!( - "Could not find corresponding link in peers network for {}", - self - ); - None - } - } - } -} - -impl fmt::Display for FaceState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Face{{{}, {}}}", self.id, self.zid) - } -} - -#[derive(Clone)] -pub struct Face { - pub(crate) tables: Arc, - pub(crate) state: Arc, -} - -impl Primitives for Face { - fn send_declare(&self, msg: zenoh_protocol::network::Declare) { - let ctrl_lock = zlock!(self.tables.ctrl_lock); - match msg.body { - zenoh_protocol::network::DeclareBody::DeclareKeyExpr(m) => { - register_expr(&self.tables, &mut self.state.clone(), m.id, &m.wire_expr); - } - zenoh_protocol::network::DeclareBody::UndeclareKeyExpr(m) => { - unregister_expr(&self.tables, &mut self.state.clone(), m.id); - } - zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_router_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_peer_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - peer, - ) - } - } else { - declare_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ) - } - } - _ => declare_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ), - } - } - zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_router_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_peer_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &peer, - ) - } - } else { - forget_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ) - } - } - _ => forget_client_subscription( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ), - } - } - zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_router_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - declare_peer_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - peer, - ) - } - } else { - declare_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ) - } - } - _ => declare_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.wire_expr, - &m.ext_info, - ), - } - } - zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { - let rtables = zread!(self.tables.tables); - match (rtables.whatami, self.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { - if let Some(router) = self - .state - .get_router(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_router_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &router, - ) - } - } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { - if rtables.full_net(WhatAmI::Peer) { - if let Some(peer) = self - .state - .get_peer(&rtables, &(msg.ext_nodeid.node_id as u64)) - { - forget_peer_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - &peer, - ) - } - } else { - forget_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ) - } - } - _ => forget_client_queryable( - &self.tables, - rtables, - &mut self.state.clone(), - &m.ext_wire_expr.wire_expr, - ), - } - } - zenoh_protocol::network::DeclareBody::DeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareToken(_m) => todo!(), - zenoh_protocol::network::DeclareBody::DeclareInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::FinalInterest(_m) => todo!(), - zenoh_protocol::network::DeclareBody::UndeclareInterest(_m) => todo!(), - } - drop(ctrl_lock); - } - - fn send_push(&self, msg: Push) { - full_reentrant_route_data( - &self.tables.tables, - &self.state, - &msg.wire_expr, - msg.ext_qos, - msg.payload, - msg.ext_nodeid.node_id as u64, - ); - } - - fn send_request(&self, msg: Request) { - match msg.payload { - RequestBody::Query(_) => { - route_query( - &self.tables, - &self.state, - &msg.wire_expr, - // parameters, - msg.id, - msg.ext_target, - // consolidation, - msg.payload, - msg.ext_nodeid.node_id as u64, - ); - } - RequestBody::Pull(_) => { - pull_data(&self.tables.tables, &self.state.clone(), msg.wire_expr); - } - _ => { - log::error!("Unsupported request"); - } - } - } - - fn send_response(&self, msg: Response) { - route_send_response( - &self.tables, - &mut self.state.clone(), - msg.rid, - msg.ext_respid, - msg.wire_expr, - msg.payload, - ); - } - - fn send_response_final(&self, msg: ResponseFinal) { - route_send_response_final(&self.tables, &mut self.state.clone(), msg.rid); - } - - fn send_close(&self) { - super::router::close_face(&self.tables, &Arc::downgrade(&self.state)); - } -} - -impl fmt::Display for Face { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.state.fmt(f) - } -} diff --git a/zenoh/src/net/routing/pubsub.rs b/zenoh/src/net/routing/pubsub.rs deleted file mode 100644 index e9dc80b024..0000000000 --- a/zenoh/src/net/routing/pubsub.rs +++ /dev/null @@ -1,1957 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use super::face::FaceState; -use super::network::Network; -use super::resource::{ - DataRoutes, Direction, PullCaches, Resource, Route, RoutingContext, SessionContext, -}; -use super::router::{RoutingExpr, Tables, TablesLock}; -use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::convert::TryFrom; -use std::sync::RwLock; -use std::sync::{Arc, RwLockReadGuard}; -use zenoh_core::zread; -use zenoh_protocol::{ - core::{ - key_expr::{keyexpr, OwnedKeyExpr}, - Reliability, WhatAmI, WireExpr, ZenohId, - }, - network::{ - declare::{ - common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, - DeclareSubscriber, Mode, UndeclareSubscriber, - }, - Push, - }, - zenoh::PushBody, -}; -use zenoh_sync::get_mut_unchecked; - -#[inline] -fn send_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - sub_info: &SubscriberInfo, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - -#[inline] -fn propagate_simple_subscription_to( - tables: &mut Tables, - dst_face: &mut Arc, - res: &Arc, - sub_info: &SubscriberInfo, - src_face: &mut Arc, - full_peer_net: bool, -) { - if (src_face.id != dst_face.id || res.expr().starts_with(super::PREFIX_LIVELINESS)) - && !dst_face.local_subs.contains(res) - && match tables.whatami { - WhatAmI::Router => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || tables.failover_brokering(src_face.zid, dst_face.zid)) - } - } - WhatAmI::Peer => { - if full_peer_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client - } - } - _ => src_face.whatami == WhatAmI::Client || dst_face.whatami == WhatAmI::Client, - } - { - get_mut_unchecked(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *sub_info, - }), - }); - } -} - -fn propagate_simple_subscription( - tables: &mut Tables, - res: &Arc, - sub_info: &SubscriberInfo, - src_face: &mut Arc, -) { - let full_peer_net = tables.full_net(WhatAmI::Peer); - for mut dst_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - propagate_simple_subscription_to( - tables, - &mut dst_face, - res, - sub_info, - src_face, - full_peer_net, - ); - } -} - -fn propagate_sourced_subscription( - tables: &Tables, - res: &Arc, - sub_info: &SubscriberInfo, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = tables.get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - sub_info, - Some(tree_sid.index() as u16), - ); - } else { - log::trace!( - "Propagating sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - if !res.context().router_subs.contains(&router) { - // Register router subscription - { - log::debug!( - "Register router subscription {} (router: {})", - res.expr(), - router - ); - get_mut_unchecked(res) - .context_mut() - .router_subs - .insert(router); - tables.router_subs.insert(res.clone()); - } - - // Propagate subscription to routers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &router, WhatAmI::Router); - } - // Propagate subscription to peers - if tables.full_net(WhatAmI::Peer) && face.whatami != WhatAmI::Peer { - register_peer_subscription(tables, face, res, sub_info, tables.zid) - } - - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); -} - -pub fn declare_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_subscription(&mut wtables, face, &mut res, sub_info, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } -} - -fn register_peer_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - if !res.context().peer_subs.contains(&peer) { - // Register peer subscription - { - log::debug!("Register peer subscription {} (peer: {})", res.expr(), peer); - get_mut_unchecked(res).context_mut().peer_subs.insert(peer); - tables.peer_subs.insert(res.clone()); - } - - // Propagate subscription to peers - propagate_sourced_subscription(tables, res, sub_info, Some(face), &peer, WhatAmI::Peer); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate subscription to clients - propagate_simple_subscription(tables, res, sub_info, face); - } -} - -pub fn declare_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - if wtables.whatami == WhatAmI::Router { - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } -} - -fn register_client_subscription( - _tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - sub_info: &SubscriberInfo, -) { - // Register subscription - { - let res = get_mut_unchecked(res); - log::debug!("Register subscription {} for {}", res.expr(), face); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(info) => { - if Mode::Pull == info.mode { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - } - None => { - get_mut_unchecked(ctx).subs = Some(*sub_info); - } - }, - None => { - res.session_ctxs.insert( - face.id, - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: Some(*sub_info), - qabl: None, - last_values: HashMap::new(), - }), - ); - } - } - } - get_mut_unchecked(face).remote_subs.insert(res.clone()); -} - -pub fn declare_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - sub_info: &SubscriberInfo, -) { - log::debug!("Register client subscription"); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_subscription(&mut wtables, face, &mut res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - match wtables.whatami { - WhatAmI::Router => { - let zid = wtables.zid; - register_router_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } - WhatAmI::Peer => { - if wtables.full_net(WhatAmI::Peer) { - let zid = wtables.zid; - register_peer_subscription( - &mut wtables, - face, - &mut res, - &propa_sub_info, - zid, - ); - } else { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) - } - } - } - _ => { - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }) - } - } - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - drop(wtables); - } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), - } -} - -#[inline] -fn remote_router_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res - .context() - .router_subs - .iter() - .any(|peer| peer != &tables.zid) -} - -#[inline] -fn remote_peer_subs(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res - .context() - .peer_subs - .iter() - .any(|peer| peer != &tables.zid) -} - -#[inline] -fn client_subs(res: &Arc) -> Vec> { - res.session_ctxs - .values() - .filter_map(|ctx| { - if ctx.subs.is_some() { - Some(ctx.face.clone()) - } else { - None - } - }) - .collect() -} - -#[inline] -fn send_forget_sourced_subscription_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget subscription {} on {}", res.expr(), someface); - - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - -fn propagate_forget_simple_subscription(tables: &mut Tables, res: &Arc) { - for face in tables.faces.values_mut() { - if face.local_subs.contains(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - get_mut_unchecked(face).local_subs.remove(res); - } - } -} - -fn propagate_forget_simple_subscription_to_peers(tables: &mut Tables, res: &Arc) { - if !tables.full_net(WhatAmI::Peer) - && res.context().router_subs.len() == 1 - && res.context().router_subs.contains(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face.local_subs.contains(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(&mut face).local_subs.remove(res); - } - } - } -} - -fn propagate_forget_sourced_subscription( - tables: &Tables, - res: &Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = tables.get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_subscription_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - Some(tree_sid.index() as u16), - ); - } else { - log::trace!( - "Propagating forget sub {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget sub {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_subscription(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router subscription {} (router: {})", - res.expr(), - router - ); - get_mut_unchecked(res) - .context_mut() - .router_subs - .retain(|sub| sub != router); - - if res.context().router_subs.is_empty() { - tables.router_subs.retain(|sub| !Arc::ptr_eq(sub, res)); - - if tables.full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_subscription(tables, res); - } - - propagate_forget_simple_subscription_to_peers(tables, res); -} - -fn undeclare_router_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res.context().router_subs.contains(router) { - unregister_router_subscription(tables, res, router); - propagate_forget_sourced_subscription(tables, res, face, router, WhatAmI::Router); - } -} - -pub fn forget_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router subscription!"), - }, - None => log::error!("Undeclare router subscription with unknown scope!"), - } -} - -fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!( - "Unregister peer subscription {} (peer: {})", - res.expr(), - peer - ); - get_mut_unchecked(res) - .context_mut() - .peer_subs - .retain(|sub| sub != peer); - - if res.context().peer_subs.is_empty() { - tables.peer_subs.retain(|sub| !Arc::ptr_eq(sub, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_subscription(tables, res); - } - } -} - -fn undeclare_peer_subscription( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res.context().peer_subs.contains(peer) { - unregister_peer_subscription(tables, res, peer); - propagate_forget_sourced_subscription(tables, res, face, peer, WhatAmI::Peer); - } -} - -pub fn forget_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - if wtables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer subscription!"), - }, - None => log::error!("Undeclare peer subscription with unknown scope!"), - } -} - -pub(crate) fn undeclare_client_subscription( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, -) { - log::debug!("Unregister client subscription {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).subs = None; - } - get_mut_unchecked(face).remote_subs.remove(res); - - let mut client_subs = client_subs(res); - let router_subs = remote_router_subs(tables, res); - let peer_subs = remote_peer_subs(tables, res); - match tables.whatami { - WhatAmI::Router => { - if client_subs.is_empty() && !peer_subs { - undeclare_router_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if client_subs.is_empty() { - if tables.full_net(WhatAmI::Peer) { - undeclare_peer_subscription(tables, None, res, &tables.zid.clone()); - } else { - propagate_forget_simple_subscription(tables, res); - } - } - } - _ => { - if client_subs.is_empty() { - propagate_forget_simple_subscription(tables, res); - } - } - } - if client_subs.len() == 1 && !router_subs && !peer_subs { - let face = &mut client_subs[0]; - if face.local_subs.contains(res) - && !(face.whatami == WhatAmI::Client - && res.expr().starts_with(super::PREFIX_LIVELINESS)) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber(UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(face).local_subs.remove(res); - } - } -} - -pub fn forget_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_subscription(&mut wtables, face, &mut res); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), - } -} - -pub(crate) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, - }; - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for sub in &tables.router_subs { - get_mut_unchecked(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); - } - } else if face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - for sub in &tables.router_subs { - if sub.context.is_some() - && (sub.context().router_subs.iter().any(|r| *r != tables.zid) - || sub.session_ctxs.values().any(|s| { - s.subs.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) - })) - { - get_mut_unchecked(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); - } - } - } - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for sub in &tables.peer_subs { - get_mut_unchecked(face).local_subs.insert(sub.clone()); - let key_expr = Resource::decl_key(sub, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); - } - } - } else { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &src_face.remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } - WhatAmI::Client => { - for src_face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for sub in &src_face.remote_subs { - propagate_simple_subscription_to( - tables, - face, - sub, - &sub_info, - &mut src_face.clone(), - false, - ); - } - } - } - } -} - -pub(crate) fn pubsub_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - for mut res in tables - .router_subs - .iter() - .filter(|res| res.context().router_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_router_subscription(tables, &mut res, node); - - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - WhatAmI::Peer => { - for mut res in tables - .peer_subs - .iter() - .filter(|res| res.context().peer_subs.contains(node)) - .cloned() - .collect::>>() - { - unregister_peer_subscription(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(tables, &res); - if !client_subs && !peer_subs { - undeclare_router_subscription(tables, None, &mut res, &tables.zid.clone()); - } - } - - // compute_matches_data_routes(tables, &mut res); - let matches_data_routes = compute_matches_data_routes_(tables, &res); - for (mut res, data_routes) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(crate) fn pubsub_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate subs to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = tables.get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let subs_res = match net_type { - WhatAmI::Router => &tables.router_subs, - _ => &tables.peer_subs, - }; - - for res in subs_res { - let subs = match net_type { - WhatAmI::Router => &res.context().router_subs, - _ => &res.context().peer_subs, - }; - for sub in subs { - if *sub == tree_id { - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // @TODO - mode: Mode::Push, - }; - send_sourced_subscription_to_net_childs( - tables, - net, - tree_childs, - res, - None, - &sub_info, - Some(tree_sid as u16), - ); - } - } - } - } - } - } - - // recompute routes - compute_data_routes_from(tables, &mut tables.root_res.clone()); -} - -pub(crate) fn pubsub_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid).cloned() { - if tables.router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &src_face.remote_subs { - let client_subs = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.subs.is_some()); - if !remote_router_subs(tables, res) && !client_subs { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.local_subs.contains(res) { - let forget = !Tables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = tables - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.subs.is_some() - && Tables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareSubscriber( - UndeclareSubscriber { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }, - ), - }); - - get_mut_unchecked(dst_face).local_subs.remove(res); - } - } else if Tables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - get_mut_unchecked(dst_face).local_subs.insert(res.clone()); - let key_expr = Resource::decl_key(res, dst_face); - let sub_info = SubscriberInfo { - reliability: Reliability::Reliable, // TODO - mode: Mode::Push, - }; - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: key_expr, - ext_info: sub_info, - }), - }); - } - } - } - } - } - } - } -} - -#[inline] -fn insert_faces_for_subs( - route: &mut Route, - expr: &RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - subs: &HashSet, -) { - if net.trees.len() > source { - for sub in subs { - if let Some(sub_idx) = net.get_idx(sub) { - if net.trees[source].directions.len() > sub_idx.index() { - if let Some(direction) = net.trees[source].directions[sub_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - route.entry(face.id).or_insert_with(|| { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ) - }); - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -fn compute_data_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = HashMap::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return Arc::new(route); - } - log::trace!( - "compute_data_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return Arc::new(route); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.full_net(WhatAmI::Peer) - || *tables.elect_router(&key_expr, tables.shared_nodes.iter()) == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_subs, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_faces_for_subs( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_subs, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if let Some(subinfo) = &context.subs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => { - source_type == WhatAmI::Client - || context.face.whatami == WhatAmI::Client - } - } && subinfo.mode == Mode::Push - { - route.entry(*sid).or_insert_with(|| { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - (context.face.clone(), key_expr.to_owned(), None) - }); - } - } - } - } - } - for mcast_group in &tables.mcast_groups { - route.insert( - mcast_group.id, - ( - mcast_group.clone(), - expr.full_expr().to_string().into(), - None, - ), - ); - } - Arc::new(route) -} - -fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = vec![]; - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return Arc::new(pull_caches); - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - Arc::new(pull_caches) -} - -pub(super) fn compute_data_routes_(tables: &Tables, res: &Arc) -> DataRoutes { - let mut routes = DataRoutes { - matching_pulls: None, - routers_data_routes: vec![], - peers_data_routes: vec![], - peer_data_route: None, - client_data_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - if !tables.full_net(WhatAmI::Peer) { - routes.peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_data_routes - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - routes.peer_data_route = Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - } - routes.matching_pulls = Some(compute_matching_pulls(tables, &mut expr)); - routes -} - -pub(crate) fn compute_data_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_data_routes = &mut res_mut.context_mut().routers_data_routes; - routers_data_routes.clear(); - routers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - if !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_data_routes = &mut res_mut.context_mut().peers_data_routes; - peers_data_routes.clear(); - peers_data_routes.resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - peers_data_routes[idx.index()] = - compute_data_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - res_mut.context_mut().peer_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_data_route = - Some(compute_data_route(tables, &mut expr, None, WhatAmI::Client)); - } - res_mut.context_mut().matching_pulls = compute_matching_pulls(tables, &mut expr); - } -} - -pub(super) fn compute_data_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_data_routes(tables, res); - let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { - compute_data_routes_from(tables, child); - } -} - -pub(super) fn compute_matches_data_routes_<'a>( - tables: &'a Tables, - res: &'a Arc, -) -> Vec<(Arc, DataRoutes)> { - let mut routes = vec![]; - if res.context.is_some() { - routes.push((res.clone(), compute_data_routes_(tables, res))); - for match_ in &res.context().matches { - let match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_data_routes_(tables, &match_); - routes.push((match_, match_routes)); - } - } - } - routes -} - -pub(super) fn disable_matches_data_routes(_tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_data_routes = false; - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_data_routes = false; - } - } - } -} - -macro_rules! treat_timestamp { - ($hlc:expr, $payload:expr, $drop:expr) => { - // if an HLC was configured (via Config.add_timestamp), - // check DataInfo and add a timestamp if there isn't - if let Some(hlc) = $hlc { - if let PushBody::Put(data) = &mut $payload { - if let Some(ref ts) = data.timestamp { - // Timestamp is present; update HLC with it (possibly raising error if delta exceed) - match hlc.update_with_timestamp(ts) { - Ok(()) => (), - Err(e) => { - if $drop { - log::error!( - "Error treating timestamp for received Data ({}). Drop it!", - e - ); - return; - } else { - data.timestamp = Some(hlc.new_timestamp()); - log::error!( - "Error treating timestamp for received Data ({}). Replace timestamp: {:?}", - e, - data.timestamp); - } - } - } - } else { - // Timestamp not present; add one - data.timestamp = Some(hlc.new_timestamp()); - log::trace!("Adding timestamp to DataInfo: {:?}", data.timestamp); - } - } - } - } -} - -#[inline] -pub(crate) fn get_data_route( - tables: &Tables, - whatami: WhatAmI, - link_id: usize, - res: &Option>, - expr: &mut RoutingExpr, - routing_context: u64, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match whatami { - WhatAmI::Router => { - let routers_net = tables.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, link_id); - res.as_ref() - .and_then(|res| res.routers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), whatami) - }) - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), - }, - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - match whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, link_id); - res.as_ref() - .and_then(|res| res.peers_data_route(local_context)) - .unwrap_or_else(|| { - compute_data_route(tables, expr, Some(local_context), whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_data_route(0)) - .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), - } - } else { - res.as_ref() - .and_then(|res| match whatami { - WhatAmI::Client => res.client_data_route(), - _ => res.peer_data_route(), - }) - .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_data_route()) - .unwrap_or_else(|| compute_data_route(tables, expr, None, whatami)), - } -} - -#[inline] -fn get_matching_pulls( - tables: &Tables, - res: &Option>, - expr: &mut RoutingExpr, -) -> Arc { - res.as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| compute_matching_pulls(tables, expr)) -} - -macro_rules! cache_data { - ( - $matching_pulls:expr, - $expr:expr, - $payload:expr - ) => { - for context in $matching_pulls.iter() { - get_mut_unchecked(&mut context.clone()) - .last_values - .insert($expr.full_expr().to_string(), $payload.clone()); - } - }; -} - -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id - && match (src_face.mcast_group.as_ref(), outface.mcast_group.as_ref()) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(outface.zid)); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.full_net(WhatAmI::Peer) - || tables.failover_brokering(src_face.zid, outface.zid)); - } - false -} - -#[cfg(feature = "stats")] -macro_rules! inc_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::buffer::Buffer; - match &$body { - PushBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - PushBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } - } - } - } - }; -} - -#[allow(clippy::too_many_arguments)] -pub fn full_reentrant_route_data( - tables_ref: &RwLock, - face: &FaceState, - expr: &WireExpr, - ext_qos: ext::QoSType, - mut payload: PushBody, - routing_context: u64, -) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping).cloned() { - Some(prefix) => { - log::trace!( - "Route data for res {}{}", - prefix.expr(), - expr.suffix.as_ref() - ); - let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); - - #[cfg(feature = "stats")] - let admin = expr.full_expr().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, rx, user, payload) - } else { - inc_stats!(face, rx, admin, payload) - } - - if tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(face.zid)) - { - let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_data_route( - &tables, - face.whatami, - face.link_id, - &res, - &mut expr, - routing_context, - ); - let matching_pulls = get_matching_pulls(&tables, &res, &mut expr); - - if !(route.is_empty() && matching_pulls.is_empty()) { - treat_timestamp!(&tables.hlc, payload, tables.drop_future_timestamp); - - if route.len() == 1 && matching_pulls.len() == 0 { - let (outface, key_expr, context) = route.values().next().unwrap(); - if should_route(&tables, face, outface, &mut expr) { - drop(tables); - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload, - }) - } - } else { - if !matching_pulls.is_empty() { - let lock = zlock!(tables.pull_caches_lock); - cache_data!(matching_pulls, expr, payload); - drop(lock); - } - - if tables.whatami == WhatAmI::Router { - let route = route - .values() - .filter(|(outface, _key_expr, _context)| { - should_route(&tables, face, outface, &mut expr) - }) - .cloned() - .collect::>(); - - drop(tables); - for (outface, key_expr, context) in route { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload: payload.clone(), - }) - } - } else { - drop(tables); - for (outface, key_expr, context) in route.values() { - if face.id != outface.id - && match ( - face.mcast_group.as_ref(), - outface.mcast_group.as_ref(), - ) { - (Some(l), Some(r)) => l != r, - _ => true, - } - { - #[cfg(feature = "stats")] - if !admin { - inc_stats!(face, tx, user, payload) - } else { - inc_stats!(face, tx, admin, payload) - } - - outface.primitives.send_push(Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - payload: payload.clone(), - }) - } - } - } - } - } - } - } - None => { - log::error!("Route data with unknown scope {}!", expr.scope); - } - } -} - -pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireExpr) { - let tables = zread!(tables_ref); - match tables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - let res = get_mut_unchecked(&mut res); - match res.session_ctxs.get_mut(&face.id) { - Some(ctx) => match &ctx.subs { - Some(_subinfo) => { - // let reliability = subinfo.reliability; - let lock = zlock!(tables.pull_caches_lock); - let route = get_mut_unchecked(ctx) - .last_values - .drain() - .map(|(name, sample)| { - ( - Resource::get_best_key(&tables.root_res, &name, face.id) - .to_owned(), - sample, - ) - }) - .collect::>(); - drop(lock); - drop(tables); - for (key_expr, payload) in route { - face.primitives.send_push(Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload, - }); - } - } - None => { - log::error!( - "Pull data for unknown subscription {} (no info)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!( - "Pull data for unknown subscription {} (no context)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - } - } - None => { - log::error!( - "Pull data for unknown subscription {} (no resource)!", - prefix.expr() + expr.suffix.as_ref() - ); - } - }, - None => { - log::error!("Pull data with unknown scope {}!", expr.scope); - } - }; -} diff --git a/zenoh/src/net/routing/queries.rs b/zenoh/src/net/routing/queries.rs deleted file mode 100644 index c2496b5ff8..0000000000 --- a/zenoh/src/net/routing/queries.rs +++ /dev/null @@ -1,2352 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use super::face::FaceState; -use super::network::Network; -use super::resource::{ - QueryRoute, QueryRoutes, QueryTargetQabl, QueryTargetQablSet, Resource, RoutingContext, - SessionContext, -}; -use super::router::{RoutingExpr, Tables, TablesLock}; -use async_trait::async_trait; -use ordered_float::OrderedFloat; -use petgraph::graph::NodeIndex; -use std::borrow::Cow; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::sync::{Arc, RwLockReadGuard, Weak}; -use zenoh_buffers::ZBuf; -use zenoh_protocol::{ - core::{ - key_expr::{ - include::{Includer, DEFAULT_INCLUDER}, - keyexpr, OwnedKeyExpr, - }, - Encoding, WhatAmI, WireExpr, ZenohId, - }, - network::{ - declare::{ - common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, - DeclareQueryable, UndeclareQueryable, - }, - request::{ext::TargetType, Request, RequestId}, - response::{self, ext::ResponderIdType, Response, ResponseFinal}, - }, - zenoh::{reply::ext::ConsolidationType, Reply, RequestBody, ResponseBody}, -}; -use zenoh_sync::get_mut_unchecked; -use zenoh_util::Timed; - -pub(crate) struct Query { - src_face: Arc, - src_qid: RequestId, -} - -#[cfg(feature = "complete_n")] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete += info.complete; - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -#[cfg(not(feature = "complete_n"))] -#[inline] -fn merge_qabl_infos(mut this: QueryableInfo, info: &QueryableInfo) -> QueryableInfo { - this.complete = u8::from(this.complete != 0 || info.complete != 0); - this.distance = std::cmp::min(this.distance, info.distance); - this -} - -fn local_router_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.full_net(WhatAmI::Peer) { - res.context.as_ref().and_then(|ctx| { - ctx.peer_qabls.iter().fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_peer_qabl_info(tables: &Tables, res: &Arc) -> QueryableInfo { - let info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res.context() - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -fn local_qabl_info(tables: &Tables, res: &Arc, face: &Arc) -> QueryableInfo { - let mut info = if tables.whatami == WhatAmI::Router && res.context.is_some() { - res.context() - .router_qabls - .iter() - .fold(None, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } else { - None - }; - if res.context.is_some() && tables.full_net(WhatAmI::Peer) { - info = res - .context() - .peer_qabls - .iter() - .fold(info, |accu, (zid, info)| { - if *zid != tables.zid { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - }) - } - res.session_ctxs - .values() - .fold(info, |accu, ctx| { - if ctx.face.id != face.id && ctx.face.whatami != WhatAmI::Peer - || face.whatami != WhatAmI::Peer - || tables.failover_brokering(ctx.face.zid, face.zid) - { - if let Some(info) = ctx.qabl.as_ref() { - Some(match accu { - Some(accu) => merge_qabl_infos(accu, info), - None => *info, - }) - } else { - accu - } - } else { - accu - } - }) - .unwrap_or(QueryableInfo { - complete: 0, - distance: 0, - }) -} - -#[allow(clippy::too_many_arguments)] -#[inline] -fn send_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.as_ref().unwrap().id { - let key_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: *qabl_info, - }), - }); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - -fn propagate_simple_queryable( - tables: &mut Tables, - res: &Arc, - src_face: Option<&mut Arc>, -) { - let full_peers_net = tables.full_net(WhatAmI::Peer); - let faces = tables.faces.values().cloned(); - for mut dst_face in faces { - let info = local_qabl_info(tables, res, &dst_face); - let current_info = dst_face.local_qabls.get(res); - if (src_face.is_none() || src_face.as_ref().unwrap().id != dst_face.id) - && (current_info.is_none() || *current_info.unwrap() != info) - && match tables.whatami { - WhatAmI::Router => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - dst_face.whatami != WhatAmI::Router - && (src_face.is_none() - || src_face.as_ref().unwrap().whatami != WhatAmI::Peer - || dst_face.whatami != WhatAmI::Peer - || tables.failover_brokering( - src_face.as_ref().unwrap().zid, - dst_face.zid, - )) - } - } - WhatAmI::Peer => { - if full_peers_net { - dst_face.whatami == WhatAmI::Client - } else { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - _ => { - src_face.is_none() - || src_face.as_ref().unwrap().whatami == WhatAmI::Client - || dst_face.whatami == WhatAmI::Client - } - } - { - get_mut_unchecked(&mut dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, &mut dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); - } - } -} - -fn propagate_sourced_queryable( - tables: &Tables, - res: &Arc, - qabl_info: &QueryableInfo, - src_face: Option<&mut Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = tables.get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - qabl_info, - src_face, - Some(tree_sid.index() as u16), - ); - } else { - log::trace!( - "Propagating qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn register_router_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - let current_info = res.context().router_qabls.get(&router); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register router queryable - { - log::debug!( - "Register router queryable {} (router: {})", - res.expr(), - router, - ); - get_mut_unchecked(res) - .context_mut() - .router_qabls - .insert(router, *qabl_info); - tables.router_qabls.insert(res.clone()); - } - - // Propagate queryable to routers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &router, - WhatAmI::Router, - ); - } - - if tables.full_net(WhatAmI::Peer) { - // Propagate queryable to peers - if face.is_none() || face.as_ref().unwrap().whatami != WhatAmI::Peer { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, face.as_deref_mut(), res, &local_info, tables.zid) - } - } - - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); -} - -pub fn declare_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - router: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register router queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - -fn register_peer_queryable( - tables: &mut Tables, - mut face: Option<&mut Arc>, - res: &mut Arc, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - let current_info = res.context().peer_qabls.get(&peer); - if current_info.is_none() || current_info.unwrap() != qabl_info { - // Register peer queryable - { - log::debug!("Register peer queryable {} (peer: {})", res.expr(), peer,); - get_mut_unchecked(res) - .context_mut() - .peer_qabls - .insert(peer, *qabl_info); - tables.peer_qabls.insert(res.clone()); - } - - // Propagate queryable to peers - propagate_sourced_queryable( - tables, - res, - qabl_info, - face.as_deref_mut(), - &peer, - WhatAmI::Peer, - ); - } - - if tables.whatami == WhatAmI::Peer { - // Propagate queryable to clients - propagate_simple_queryable(tables, res, face); - } -} - -pub fn declare_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, - peer: ZenohId, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register peer queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - let mut face = Some(face); - register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - if wtables.whatami == WhatAmI::Router { - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - } - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } -} - -fn register_client_queryable( - _tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, - qabl_info: &QueryableInfo, -) { - // Register queryable - { - let res = get_mut_unchecked(res); - log::debug!("Register queryable {} (face: {})", res.expr(), face,); - get_mut_unchecked(res.session_ctxs.entry(face.id).or_insert_with(|| { - Arc::new(SessionContext { - face: face.clone(), - local_expr_id: None, - remote_expr_id: None, - subs: None, - qabl: None, - last_values: HashMap::new(), - }) - })) - .qabl = Some(*qabl_info); - } - get_mut_unchecked(face).remote_qabls.insert(res.clone()); -} - -pub fn declare_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - qabl_info: &QueryableInfo, -) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register client queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_queryable(&mut wtables, face, &mut res, qabl_info); - - match wtables.whatami { - WhatAmI::Router => { - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } - WhatAmI::Peer => { - if wtables.full_net(WhatAmI::Peer) { - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable( - &mut wtables, - Some(face), - &mut res, - &local_details, - zid, - ); - } else { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - _ => { - propagate_simple_queryable(&mut wtables, &res, Some(face)); - } - } - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), - } -} - -#[inline] -fn remote_router_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res - .context() - .router_qabls - .keys() - .any(|router| router != &tables.zid) -} - -#[inline] -fn remote_peer_qabls(tables: &Tables, res: &Arc) -> bool { - res.context.is_some() - && res - .context() - .peer_qabls - .keys() - .any(|peer| peer != &tables.zid) -} - -#[inline] -fn client_qabls(res: &Arc) -> Vec> { - res.session_ctxs - .values() - .filter_map(|ctx| { - if ctx.qabl.is_some() { - Some(ctx.face.clone()) - } else { - None - } - }) - .collect() -} - -#[inline] -fn send_forget_sourced_queryable_to_net_childs( - tables: &Tables, - net: &Network, - childs: &[NodeIndex], - res: &Arc, - src_face: Option<&Arc>, - routing_context: Option, -) { - for child in childs { - if net.graph.contains_node(*child) { - match tables.get_face(&net.graph[*child].zid).cloned() { - Some(mut someface) => { - if src_face.is_none() || someface.id != src_face.unwrap().id { - let wire_expr = Resource::decl_key(res, &mut someface); - - log::debug!("Send forget queryable {} on {}", res.expr(), someface); - - someface.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: routing_context.unwrap_or(0), - }, - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - } - } - None => log::trace!("Unable to find face for zid {}", net.graph[*child].zid), - } - } - } -} - -fn propagate_forget_simple_queryable(tables: &mut Tables, res: &mut Arc) { - for face in tables.faces.values_mut() { - if face.local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(face).local_qabls.remove(res); - } - } -} - -fn propagate_forget_simple_queryable_to_peers(tables: &mut Tables, res: &mut Arc) { - if !tables.full_net(WhatAmI::Peer) - && res.context().router_qabls.len() == 1 - && res.context().router_qabls.contains_key(&tables.zid) - { - for mut face in tables - .faces - .values() - .cloned() - .collect::>>() - { - if face.whatami == WhatAmI::Peer - && face.local_qabls.contains_key(res) - && !res.session_ctxs.values().any(|s| { - face.zid != s.face.zid - && s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) - }) - { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(&mut face).local_qabls.remove(res); - } - } - } -} - -fn propagate_forget_sourced_queryable( - tables: &mut Tables, - res: &mut Arc, - src_face: Option<&Arc>, - source: &ZenohId, - net_type: WhatAmI, -) { - let net = tables.get_net(net_type).unwrap(); - match net.get_idx(source) { - Some(tree_sid) => { - if net.trees.len() > tree_sid.index() { - send_forget_sourced_queryable_to_net_childs( - tables, - net, - &net.trees[tree_sid.index()].childs, - res, - src_face, - Some(tree_sid.index() as u16), - ); - } else { - log::trace!( - "Propagating forget qabl {}: tree for node {} sid:{} not yet ready", - res.expr(), - tree_sid.index(), - source - ); - } - } - None => log::error!( - "Error propagating forget qabl {}: cannot get index of {}!", - res.expr(), - source - ), - } -} - -fn unregister_router_queryable(tables: &mut Tables, res: &mut Arc, router: &ZenohId) { - log::debug!( - "Unregister router queryable {} (router: {})", - res.expr(), - router, - ); - get_mut_unchecked(res) - .context_mut() - .router_qabls - .remove(router); - - if res.context().router_qabls.is_empty() { - tables.router_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if tables.full_net(WhatAmI::Peer) { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } - propagate_forget_simple_queryable(tables, res); - } - - propagate_forget_simple_queryable_to_peers(tables, res); -} - -fn undeclare_router_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - router: &ZenohId, -) { - if res.context().router_qabls.contains_key(router) { - unregister_router_queryable(tables, res, router); - propagate_forget_sourced_queryable(tables, res, face, router, WhatAmI::Router); - } -} - -pub fn forget_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - router: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router queryable!"), - }, - None => log::error!("Undeclare router queryable with unknown scope!"), - } -} - -fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { - log::debug!("Unregister peer queryable {} (peer: {})", res.expr(), peer,); - get_mut_unchecked(res).context_mut().peer_qabls.remove(peer); - - if res.context().peer_qabls.is_empty() { - tables.peer_qabls.retain(|qabl| !Arc::ptr_eq(qabl, res)); - - if tables.whatami == WhatAmI::Peer { - propagate_forget_simple_queryable(tables, res); - } - } -} - -fn undeclare_peer_queryable( - tables: &mut Tables, - face: Option<&Arc>, - res: &mut Arc, - peer: &ZenohId, -) { - if res.context().peer_qabls.contains_key(peer) { - unregister_peer_queryable(tables, res, peer); - propagate_forget_sourced_queryable(tables, res, face, peer, WhatAmI::Peer); - } -} - -pub fn forget_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, - peer: &ZenohId, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - - if wtables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } - } - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer queryable!"), - }, - None => log::error!("Undeclare peer queryable with unknown scope!"), - } -} - -pub(crate) fn undeclare_client_queryable( - tables: &mut Tables, - face: &mut Arc, - res: &mut Arc, -) { - log::debug!("Unregister client queryable {} for {}", res.expr(), face); - if let Some(ctx) = get_mut_unchecked(res).session_ctxs.get_mut(&face.id) { - get_mut_unchecked(ctx).qabl = None; - if ctx.qabl.is_none() { - get_mut_unchecked(face).remote_qabls.remove(res); - } - } - - let mut client_qabls = client_qabls(res); - let router_qabls = remote_router_qabls(tables, res); - let peer_qabls = remote_peer_qabls(tables, res); - - match tables.whatami { - WhatAmI::Router => { - if client_qabls.is_empty() && !peer_qabls { - undeclare_router_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, res); - register_router_queryable(tables, None, res, &local_info, tables.zid); - propagate_forget_simple_queryable_to_peers(tables, res); - } - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - if client_qabls.is_empty() { - undeclare_peer_queryable(tables, None, res, &tables.zid.clone()); - } else { - let local_info = local_peer_qabl_info(tables, res); - register_peer_queryable(tables, None, res, &local_info, tables.zid); - } - } else if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - _ => { - if client_qabls.is_empty() { - propagate_forget_simple_queryable(tables, res); - } else { - propagate_simple_queryable(tables, res, None); - } - } - } - - if client_qabls.len() == 1 && !router_qabls && !peer_qabls { - let face = &mut client_qabls[0]; - if face.local_qabls.contains_key(res) { - let wire_expr = Resource::get_best_key(res, "", face.id); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(face).local_qabls.remove(res); - } - } -} - -pub fn forget_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, - face: &mut Arc, - expr: &WireExpr, -) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_queryable(&mut wtables, face, &mut res); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes_(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), - } -} - -pub(crate) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { - match tables.whatami { - WhatAmI::Router => { - if face.whatami == WhatAmI::Client { - for qabl in tables.router_qabls.iter() { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .local_qabls - .insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); - } - } - } else if face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - for qabl in tables.router_qabls.iter() { - if qabl.context.is_some() - && (qabl.context().router_qabls.keys().any(|r| *r != tables.zid) - || qabl.session_ctxs.values().any(|s| { - s.qabl.is_some() - && (s.face.whatami == WhatAmI::Client - || (s.face.whatami == WhatAmI::Peer - && tables.failover_brokering(s.face.zid, face.zid))) - })) - { - let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .local_qabls - .insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); - } - } - } - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - if face.whatami == WhatAmI::Client { - for qabl in &tables.peer_qabls { - if qabl.context.is_some() { - let info = local_qabl_info(tables, qabl, face); - get_mut_unchecked(face) - .local_qabls - .insert(qabl.clone(), info); - let key_expr = Resource::decl_key(qabl, face); - face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); - } - } - } - } else { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face.remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } - WhatAmI::Client => { - for face in tables - .faces - .values() - .cloned() - .collect::>>() - { - for qabl in face.remote_qabls.iter() { - propagate_simple_queryable(tables, qabl, Some(&mut face.clone())); - } - } - } - } -} - -pub(crate) fn queries_remove_node(tables: &mut Tables, node: &ZenohId, net_type: WhatAmI) { - match net_type { - WhatAmI::Router => { - let mut qabls = vec![]; - for res in tables.router_qabls.iter() { - for qabl in res.context().router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_router_queryable(tables, &mut res, node); - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - } - } - WhatAmI::Peer => { - let mut qabls = vec![]; - for res in tables.router_qabls.iter() { - for qabl in res.context().router_qabls.keys() { - if qabl == node { - qabls.push(res.clone()); - } - } - } - for mut res in qabls { - unregister_peer_queryable(tables, &mut res, node); - - if tables.whatami == WhatAmI::Router { - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(tables, &res); - if !client_qabls && !peer_qabls { - undeclare_router_queryable(tables, None, &mut res, &tables.zid.clone()); - } else { - let local_info = local_router_qabl_info(tables, &res); - register_router_queryable(tables, None, &mut res, &local_info, tables.zid); - } - } - - let matches_query_routes = compute_matches_query_routes_(tables, &res); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res) - } - } - _ => (), - } -} - -pub(crate) fn queries_linkstate_change(tables: &mut Tables, zid: &ZenohId, links: &[ZenohId]) { - if let Some(src_face) = tables.get_face(zid) { - if tables.router_peers_failover_brokering - && tables.whatami == WhatAmI::Router - && src_face.whatami == WhatAmI::Peer - { - for res in &src_face.remote_qabls { - let client_qabls = res - .session_ctxs - .values() - .any(|ctx| ctx.face.whatami == WhatAmI::Client && ctx.qabl.is_some()); - if !remote_router_qabls(tables, res) && !client_qabls { - for ctx in get_mut_unchecked(&mut res.clone()) - .session_ctxs - .values_mut() - { - let dst_face = &mut get_mut_unchecked(ctx).face; - if dst_face.whatami == WhatAmI::Peer && src_face.zid != dst_face.zid { - if dst_face.local_qabls.contains_key(res) { - let forget = !Tables::failover_brokering_to(links, dst_face.zid) - && { - let ctx_links = tables - .peers_net - .as_ref() - .map(|net| net.get_links(dst_face.zid)) - .unwrap_or_else(|| &[]); - res.session_ctxs.values().any(|ctx2| { - ctx2.face.whatami == WhatAmI::Peer - && ctx2.qabl.is_some() - && Tables::failover_brokering_to( - ctx_links, - ctx2.face.zid, - ) - }) - }; - if forget { - let wire_expr = Resource::get_best_key(res, "", dst_face.id); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::UndeclareQueryable(UndeclareQueryable { - id: 0, // TODO - ext_wire_expr: WireExprType { wire_expr }, - }), - }); - - get_mut_unchecked(dst_face).local_qabls.remove(res); - } - } else if Tables::failover_brokering_to(links, ctx.face.zid) { - let dst_face = &mut get_mut_unchecked(ctx).face; - let info = local_qabl_info(tables, res, dst_face); - get_mut_unchecked(dst_face) - .local_qabls - .insert(res.clone(), info); - let key_expr = Resource::decl_key(res, dst_face); - dst_face.primitives.send_declare(Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareQueryable(DeclareQueryable { - id: 0, // TODO - wire_expr: key_expr, - ext_info: info, - }), - }); - } - } - } - } - } - } - } -} - -pub(crate) fn queries_tree_change( - tables: &mut Tables, - new_childs: &[Vec], - net_type: WhatAmI, -) { - // propagate qabls to new childs - for (tree_sid, tree_childs) in new_childs.iter().enumerate() { - if !tree_childs.is_empty() { - let net = tables.get_net(net_type).unwrap(); - let tree_idx = NodeIndex::new(tree_sid); - if net.graph.contains_node(tree_idx) { - let tree_id = net.graph[tree_idx].zid; - - let qabls_res = match net_type { - WhatAmI::Router => &tables.router_qabls, - _ => &tables.peer_qabls, - }; - - for res in qabls_res { - let qabls = match net_type { - WhatAmI::Router => &res.context().router_qabls, - _ => &res.context().peer_qabls, - }; - if let Some(qabl_info) = qabls.get(&tree_id) { - send_sourced_queryable_to_net_childs( - tables, - net, - tree_childs, - res, - qabl_info, - None, - Some(tree_sid as u16), - ); - } - } - } - } - } - - // recompute routes - compute_query_routes_from(tables, &mut tables.root_res.clone()); -} - -#[inline] -#[allow(clippy::too_many_arguments)] -fn insert_target_for_qabls( - route: &mut QueryTargetQablSet, - expr: &mut RoutingExpr, - tables: &Tables, - net: &Network, - source: usize, - qabls: &HashMap, - complete: bool, -) { - if net.trees.len() > source { - for (qabl, qabl_info) in qabls { - if let Some(qabl_idx) = net.get_idx(qabl) { - if net.trees[source].directions.len() > qabl_idx.index() { - if let Some(direction) = net.trees[source].directions[qabl_idx.index()] { - if net.graph.contains_node(direction) { - if let Some(face) = tables.get_face(&net.graph[direction].zid) { - if net.distances.len() > qabl_idx.index() { - let key_expr = - Resource::get_best_key(expr.prefix, expr.suffix, face.id); - route.push(QueryTargetQabl { - direction: ( - face.clone(), - key_expr.to_owned(), - if source != 0 { - Some(source as u16) - } else { - None - }, - ), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: net.distances[qabl_idx.index()], - }); - } - } - } - } - } - } - } - } else { - log::trace!("Tree for node sid:{} not yet ready", source); - } -} - -lazy_static::lazy_static! { - static ref EMPTY_ROUTE: Arc = Arc::new(Vec::new()); -} -fn compute_query_route( - tables: &Tables, - expr: &mut RoutingExpr, - source: Option, - source_type: WhatAmI, -) -> Arc { - let mut route = QueryTargetQablSet::new(); - let key_expr = expr.full_expr(); - if key_expr.ends_with('/') { - return EMPTY_ROUTE.clone(); - } - log::trace!( - "compute_query_route({}, {:?}, {:?})", - key_expr, - source, - source_type - ); - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return EMPTY_ROUTE.clone(); - } - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - - let master = tables.whatami != WhatAmI::Router - || !tables.full_net(WhatAmI::Peer) - || *tables.elect_router(&key_expr, tables.shared_nodes.iter()) == tables.zid; - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - let complete = DEFAULT_INCLUDER.includes(mres.expr().as_bytes(), key_expr.as_bytes()); - if tables.whatami == WhatAmI::Router { - if master || source_type == WhatAmI::Router { - let net = tables.routers_net.as_ref().unwrap(); - let router_source = match source_type { - WhatAmI::Router => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - router_source, - &mres.context().router_qabls, - complete, - ); - } - - if (master || source_type != WhatAmI::Router) && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - } - - if tables.whatami == WhatAmI::Peer && tables.full_net(WhatAmI::Peer) { - let net = tables.peers_net.as_ref().unwrap(); - let peer_source = match source_type { - WhatAmI::Router | WhatAmI::Peer => source.unwrap(), - _ => net.idx.index(), - }; - insert_target_for_qabls( - &mut route, - expr, - tables, - net, - peer_source, - &mres.context().peer_qabls, - complete, - ); - } - - if tables.whatami != WhatAmI::Router || master || source_type == WhatAmI::Router { - for (sid, context) in &mres.session_ctxs { - if match tables.whatami { - WhatAmI::Router => context.face.whatami != WhatAmI::Router, - _ => source_type == WhatAmI::Client || context.face.whatami == WhatAmI::Client, - } { - let key_expr = Resource::get_best_key(expr.prefix, expr.suffix, *sid); - if let Some(qabl_info) = context.qabl.as_ref() { - route.push(QueryTargetQabl { - direction: (context.face.clone(), key_expr.to_owned(), None), - complete: if complete { - qabl_info.complete as u64 - } else { - 0 - }, - distance: 0.5, - }); - } - } - } - } - } - route.sort_by_key(|qabl| OrderedFloat(qabl.distance)); - Arc::new(route) -} - -pub(super) fn compute_query_routes_(tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes { - routers_query_routes: vec![], - peers_query_routes: vec![], - peer_query_route: None, - client_query_route: None, - }; - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - if !tables.full_net(WhatAmI::Peer) { - routes.peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - routes.peer_query_route = Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - routes.client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - } - routes -} - -pub(crate) fn compute_query_routes(tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - let mut expr = RoutingExpr::new(res, ""); - if tables.whatami == WhatAmI::Router { - let indexes = tables - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let routers_query_routes = &mut res_mut.context_mut().routers_query_routes; - routers_query_routes.clear(); - routers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Router); - } - - if !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - } - if (tables.whatami == WhatAmI::Router || tables.whatami == WhatAmI::Peer) - && tables.full_net(WhatAmI::Peer) - { - let indexes = tables - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - let peers_query_routes = &mut res_mut.context_mut().peers_query_routes; - peers_query_routes.clear(); - peers_query_routes - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - peers_query_routes[idx.index()] = - compute_query_route(tables, &mut expr, Some(idx.index()), WhatAmI::Peer); - } - } - if tables.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - res_mut.context_mut().peer_query_route = - Some(compute_query_route(tables, &mut expr, None, WhatAmI::Peer)); - } - if tables.whatami == WhatAmI::Client { - res_mut.context_mut().client_query_route = Some(compute_query_route( - tables, - &mut expr, - None, - WhatAmI::Client, - )); - } - } -} - -fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_query_routes(tables, res); - let res = get_mut_unchecked(res); - for child in res.childs.values_mut() { - compute_query_routes_from(tables, child); - } -} - -pub(super) fn compute_matches_query_routes_( - tables: &Tables, - res: &Arc, -) -> Vec<(Arc, QueryRoutes)> { - let mut routes = vec![]; - if res.context.is_some() { - routes.push((res.clone(), compute_query_routes_(tables, res))); - for match_ in &res.context().matches { - let match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - let match_routes = compute_query_routes_(tables, &match_); - routes.push((match_, match_routes)); - } - } - } - routes -} - -#[inline] -fn insert_pending_query(outface: &mut Arc, query: Arc) -> RequestId { - let outface_mut = get_mut_unchecked(outface); - outface_mut.next_qid += 1; - let qid = outface_mut.next_qid; - outface_mut.pending_queries.insert(qid, query); - qid -} - -#[inline] -fn should_route( - tables: &Tables, - src_face: &FaceState, - outface: &Arc, - expr: &mut RoutingExpr, -) -> bool { - if src_face.id != outface.id { - let dst_master = tables.whatami != WhatAmI::Router - || outface.whatami != WhatAmI::Peer - || tables.peers_net.is_none() - || tables.zid - == *tables.elect_router(expr.full_expr(), tables.get_router_links(outface.zid)); - - return dst_master - && (src_face.whatami != WhatAmI::Peer - || outface.whatami != WhatAmI::Peer - || tables.full_net(WhatAmI::Peer) - || tables.failover_brokering(src_face.zid, outface.zid)); - } - false -} - -#[inline] -fn compute_final_route( - tables: &Tables, - qabls: &Arc, - src_face: &Arc, - expr: &mut RoutingExpr, - target: &TargetType, - query: Arc, -) -> QueryRoute { - match target { - TargetType::All => { - let mut route = HashMap::new(); - for qabl in qabls.iter() { - if should_route(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - TargetType::AllComplete => { - let mut route = HashMap::new(); - for qabl in qabls.iter() { - if qabl.complete > 0 && should_route(tables, src_face, &qabl.direction.0, expr) { - #[cfg(feature = "complete_n")] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, *target) - }); - } - #[cfg(not(feature = "complete_n"))] - { - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid) - }); - } - } - } - route - } - #[cfg(feature = "complete_n")] - TargetType::Complete(n) => { - let mut route = HashMap::new(); - let mut remaining = *n; - if src_face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - let source_links = tables - .peers_net - .as_ref() - .map(|net| net.get_links(src_face.zid)) - .unwrap_or_default(); - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id - && qabl.complete > 0 - && (qabl.direction.0.whatami != WhatAmI::Peer - || (tables.router_peers_failover_brokering - && Tables::failover_brokering_to( - source_links, - qabl.direction.0.zid, - ))) - { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } - } - } - } else { - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id && qabl.complete > 0 { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } - } - } - } - route - } - TargetType::BestMatching => { - if let Some(qabl) = qabls - .iter() - .find(|qabl| qabl.direction.0.id != src_face.id && qabl.complete > 0) - { - let mut route = HashMap::new(); - #[cfg(feature = "complete_n")] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid, *target)); - } - #[cfg(not(feature = "complete_n"))] - { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query); - route.insert(direction.0.id, (direction, qid)); - } - route - } else { - compute_final_route(tables, qabls, src_face, expr, &TargetType::All, query) - } - } - } -} - -#[inline] -fn compute_local_replies( - tables: &Tables, - prefix: &Arc, - suffix: &str, - face: &Arc, -) -> Vec<(WireExpr<'static>, ZBuf)> { - let mut result = vec![]; - // Only the first routing point in the query route - // should return the liveliness tokens - if face.whatami == WhatAmI::Client { - let key_expr = prefix.expr() + suffix; - let key_expr = match OwnedKeyExpr::try_from(key_expr) { - Ok(ke) => ke, - Err(e) => { - log::warn!("Invalid KE reached the system: {}", e); - return result; - } - }; - if key_expr.starts_with(super::PREFIX_LIVELINESS) { - let res = Resource::get_resource(prefix, suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &key_expr))); - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - if (mres.context.is_some() - && (!mres.context().router_subs.is_empty() - || !mres.context().peer_subs.is_empty())) - || mres.session_ctxs.values().any(|ctx| ctx.subs.is_some()) - { - result.push((Resource::get_best_key(&mres, "", face.id), ZBuf::default())); - } - } - } - } - result -} - -#[derive(Clone)] -struct QueryCleanup { - tables: Arc, - face: Weak, - qid: RequestId, -} - -#[async_trait] -impl Timed for QueryCleanup { - async fn run(&mut self) { - if let Some(mut face) = self.face.upgrade() { - let tables_lock = zwrite!(self.tables.tables); - if let Some(query) = get_mut_unchecked(&mut face) - .pending_queries - .remove(&self.qid) - { - drop(tables_lock); - log::warn!( - "Didn't receive final reply {}:{} from {}: Timeout!", - query.src_face, - self.qid, - face - ); - finalize_pending_query(query); - } - } - } -} - -pub(super) fn disable_matches_query_routes(_tables: &mut Tables, res: &mut Arc) { - if res.context.is_some() { - get_mut_unchecked(res).context_mut().valid_query_routes = false; - for match_ in &res.context().matches { - let mut match_ = match_.upgrade().unwrap(); - if !Arc::ptr_eq(&match_, res) { - get_mut_unchecked(&mut match_) - .context_mut() - .valid_query_routes = false; - } - } - } -} - -#[inline] -fn get_query_route( - tables: &Tables, - face: &FaceState, - res: &Option>, - expr: &mut RoutingExpr, - routing_context: u64, -) -> Arc { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => { - let routers_net = tables.routers_net.as_ref().unwrap(); - let local_context = routers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.routers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } else { - res.as_ref() - .and_then(|res| res.peer_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.routers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - }, - WhatAmI::Peer => { - if tables.full_net(WhatAmI::Peer) { - match face.whatami { - WhatAmI::Router | WhatAmI::Peer => { - let peers_net = tables.peers_net.as_ref().unwrap(); - let local_context = - peers_net.get_local_context(routing_context, face.link_id); - res.as_ref() - .and_then(|res| res.peers_query_route(local_context)) - .unwrap_or_else(|| { - compute_query_route(tables, expr, Some(local_context), face.whatami) - }) - } - _ => res - .as_ref() - .and_then(|res| res.peers_query_route(0)) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - } - } else { - res.as_ref() - .and_then(|res| match face.whatami { - WhatAmI::Client => res.client_query_route(), - _ => res.peer_query_route(), - }) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)) - } - } - _ => res - .as_ref() - .and_then(|res| res.client_query_route()) - .unwrap_or_else(|| compute_query_route(tables, expr, None, face.whatami)), - } -} - -#[cfg(feature = "stats")] -macro_rules! inc_req_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::buffer::Buffer; - match &$body { - RequestBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - RequestBody::Del(_) => { - stats.[<$txrx _z_del_msgs>].[](1); - } - RequestBody::Query(q) => { - stats.[<$txrx _z_query_msgs>].[](1); - stats.[<$txrx _z_query_pl_bytes>].[]( - q.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), - ); - } - RequestBody::Pull(_) => (), - } - } - } - }; -} - -#[cfg(feature = "stats")] -macro_rules! inc_res_stats { - ( - $face:expr, - $txrx:ident, - $space:ident, - $body:expr - ) => { - paste::paste! { - if let Some(stats) = $face.stats.as_ref() { - use zenoh_buffers::buffer::Buffer; - match &$body { - ResponseBody::Put(p) => { - stats.[<$txrx _z_put_msgs>].[](1); - stats.[<$txrx _z_put_pl_bytes>].[](p.payload.len()); - } - ResponseBody::Reply(r) => { - stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[](r.payload.len()); - } - ResponseBody::Err(e) => { - stats.[<$txrx _z_reply_msgs>].[](1); - stats.[<$txrx _z_reply_pl_bytes>].[]( - e.ext_body.as_ref().map(|b| b.payload.len()).unwrap_or(0), - ); - } - ResponseBody::Ack(_) => (), - } - } - } - }; -} - -#[allow(clippy::too_many_arguments)] -pub fn route_query( - tables_ref: &Arc, - face: &Arc, - expr: &WireExpr, - qid: RequestId, - target: TargetType, - body: RequestBody, - routing_context: u64, -) { - let rtables = zread!(tables_ref.tables); - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => { - log::debug!( - "Route query {}:{} for res {}{}", - face, - qid, - prefix.expr(), - expr.suffix.as_ref(), - ); - let prefix = prefix.clone(); - let mut expr = RoutingExpr::new(&prefix, expr.suffix.as_ref()); - - #[cfg(feature = "stats")] - let admin = expr.full_expr().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(face, rx, user, body) - } else { - inc_req_stats!(face, rx, admin, body) - } - - if rtables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer - || rtables.peers_net.is_none() - || rtables.zid - == *rtables.elect_router(expr.full_expr(), rtables.get_router_links(face.zid)) - { - let res = Resource::get_resource(&prefix, expr.suffix); - let route = get_query_route(&rtables, face, &res, &mut expr, routing_context); - - let query = Arc::new(Query { - src_face: face.clone(), - src_qid: qid, - }); - - let queries_lock = zwrite!(tables_ref.queries_lock); - let route = compute_final_route(&rtables, &route, face, &mut expr, &target, query); - let local_replies = compute_local_replies(&rtables, &prefix, expr.suffix, face); - let zid = rtables.zid; - - drop(queries_lock); - drop(rtables); - - for (expr, payload) in local_replies { - let payload = ResponseBody::Reply(Reply { - timestamp: None, - encoding: Encoding::default(), - ext_sinfo: None, - ext_consolidation: ConsolidationType::default(), - #[cfg(feature = "shared-memory")] - ext_shm: None, - ext_attachment: None, // @TODO: expose it in the API - ext_unknown: vec![], - payload, - }); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, tx, user, payload) - } else { - inc_res_stats!(face, tx, admin, payload) - } - - face.primitives.clone().send_response(Response { - rid: qid, - wire_expr: expr, - payload, - ext_qos: response::ext::QoSType::declare_default(), - ext_tstamp: None, - ext_respid: Some(response::ext::ResponderIdType { - zid, - eid: 0, // TODO - }), - }); - } - - if route.is_empty() { - log::debug!( - "Send final reply {}:{} (no matching queryables or not master)", - face, - qid - ); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } else { - // let timer = tables.timer.clone(); - // let timeout = tables.queries_default_timeout; - #[cfg(feature = "complete_n")] - { - for ((outface, key_expr, context), qid, t) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), // TODO - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - ext_target: *t, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); - } - } - - #[cfg(not(feature = "complete_n"))] - { - for ((outface, key_expr, context), qid) in route.values() { - // timer.add(TimedEvent::once( - // Instant::now() + timeout, - // QueryCleanup { - // tables: tables_ref.clone(), - // face: Arc::downgrade(&outface), - // *qid, - // }, - // )); - #[cfg(feature = "stats")] - if !admin { - inc_req_stats!(outface, tx, user, body) - } else { - inc_req_stats!(outface, tx, admin, body) - } - - log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { - node_id: context.unwrap_or(0), - }, - ext_target: target, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); - } - } - } - } else { - log::debug!("Send final reply {}:{} (not master)", face, qid); - drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } - } - None => { - log::error!( - "Route query with unknown scope {}! Send final reply.", - expr.scope - ); - drop(rtables); - face.primitives.clone().send_response_final(ResponseFinal { - rid: qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } - } -} - -#[allow(clippy::too_many_arguments)] -pub(crate) fn route_send_response( - tables_ref: &Arc, - face: &mut Arc, - qid: RequestId, - ext_respid: Option, - key_expr: WireExpr, - body: ResponseBody, -) { - let queries_lock = zread!(tables_ref.queries_lock); - #[cfg(feature = "stats")] - let admin = key_expr.as_str().starts_with("@/"); - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(face, rx, user, body) - } else { - inc_res_stats!(face, rx, admin, body) - } - - match face.pending_queries.get(&qid) { - Some(query) => { - drop(queries_lock); - - #[cfg(feature = "stats")] - if !admin { - inc_res_stats!(query.src_face, tx, user, body) - } else { - inc_res_stats!(query.src_face, tx, admin, body) - } - - query.src_face.primitives.clone().send_response(Response { - rid: query.src_qid, - wire_expr: key_expr.to_owned(), - payload: body, - ext_qos: response::ext::QoSType::response_default(), - ext_tstamp: None, - ext_respid, - }); - } - None => log::warn!( - "Route reply {}:{} from {}: Query nof found!", - face, - qid, - face - ), - } -} - -pub(crate) fn route_send_response_final( - tables_ref: &Arc, - face: &mut Arc, - qid: RequestId, -) { - let queries_lock = zwrite!(tables_ref.queries_lock); - match get_mut_unchecked(face).pending_queries.remove(&qid) { - Some(query) => { - drop(queries_lock); - log::debug!( - "Received final reply {}:{} from {}", - query.src_face, - qid, - face - ); - finalize_pending_query(query); - } - None => log::warn!( - "Route final reply {}:{} from {}: Query nof found!", - face, - qid, - face - ), - } -} - -pub(crate) fn finalize_pending_queries(tables_ref: &TablesLock, face: &mut Arc) { - let queries_lock = zwrite!(tables_ref.queries_lock); - for (_, query) in get_mut_unchecked(face).pending_queries.drain() { - finalize_pending_query(query); - } - drop(queries_lock); -} - -pub(crate) fn finalize_pending_query(query: Arc) { - if let Some(query) = Arc::into_inner(query) { - log::debug!("Propagate final reply {}:{}", query.src_face, query.src_qid); - query - .src_face - .primitives - .clone() - .send_response_final(ResponseFinal { - rid: query.src_qid, - ext_qos: response::ext::QoSType::response_final_default(), - ext_tstamp: None, - }); - } -} From 331cb3f89a4aac863755c312cf38517a8cd98719 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 9 Jan 2024 16:57:05 +0100 Subject: [PATCH 028/122] Fix complete_n build --- zenoh/src/net/routing/dispatcher/queries.rs | 81 ++++++++------------- 1 file changed, 29 insertions(+), 52 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 6ecc23b5bc..d80e44d330 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -156,47 +156,21 @@ fn compute_final_route( TargetType::Complete(n) => { let mut route = HashMap::new(); let mut remaining = *n; - if src_face.whatami == WhatAmI::Peer && !tables.full_net(WhatAmI::Peer) { - let source_links = tables - .peers_net - .as_ref() - .map(|net| net.get_links(src_face.zid)) - .unwrap_or_default(); - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id - && qabl.complete > 0 - && (qabl.direction.0.whatami != WhatAmI::Peer - || (tables.router_peers_failover_brokering - && Tables::failover_brokering_to( - source_links, - qabl.direction.0.zid, - ))) - { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } - } - } - } else { - for qabl in qabls.iter() { - if qabl.direction.0.id != src_face.id && qabl.complete > 0 { - let nb = std::cmp::min(qabl.complete, remaining); - route.entry(qabl.direction.0.id).or_insert_with(|| { - let mut direction = qabl.direction.clone(); - let qid = insert_pending_query(&mut direction.0, query.clone()); - (direction, qid, TargetType::Complete(nb)) - }); - remaining -= nb; - if remaining == 0 { - break; - } + for qabl in qabls.iter() { + if qabl.complete > 0 + && tables + .hat_code + .egress_filter(tables, src_face, &qabl.direction.0, expr) + { + let nb = std::cmp::min(qabl.complete, remaining); + route.entry(qabl.direction.0.id).or_insert_with(|| { + let mut direction = qabl.direction.clone(); + let qid = insert_pending_query(&mut direction.0, query.clone()); + (direction, qid, TargetType::Complete(nb)) + }); + remaining -= nb; + if remaining == 0 { + break; } } } @@ -484,17 +458,20 @@ pub fn route_query( } log::trace!("Propagate query {}:{} to {}", face, qid, outface); - outface.primitives.send_request(Request { - id: *qid, - wire_expr: key_expr.into(), - ext_qos: ext::QoSType::request_default(), // TODO - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - ext_target: *t, - ext_budget: None, - ext_timeout: None, - payload: body.clone(), - }); + outface.primitives.send_request(RoutingContext::with_expr( + Request { + id: *qid, + wire_expr: key_expr.into(), + ext_qos: ext::QoSType::request_default(), // TODO + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + ext_target: *t, + ext_budget: None, + ext_timeout: None, + payload: body.clone(), + }, + expr.full_expr().to_string(), + )); } } From 458e64cc551eb1d03c76e51f945f1472e4beedc7 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Tue, 9 Jan 2024 17:28:51 +0100 Subject: [PATCH 029/122] Remove useless checks --- zenoh/src/net/routing/hat/router/mod.rs | 84 +++++++++---------------- 1 file changed, 28 insertions(+), 56 deletions(-) diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index b3ec50c6fc..4f7d38da4c 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -355,15 +355,13 @@ impl HatBaseTrait for HatCode { face: &mut Face, transport: &TransportUnicast, ) -> ZResult<()> { - let link_id = match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => hat_mut!(tables) + let link_id = match face.state.whatami { + WhatAmI::Router => hat_mut!(tables) .routers_net .as_mut() .unwrap() .add_link(transport.clone()), - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if let Some(net) = hat_mut!(tables).peers_net.as_mut() { net.add_link(transport.clone()) } else { @@ -384,13 +382,11 @@ impl HatBaseTrait for HatCode { pubsub_new_face(tables, &mut face.state); queries_new_face(tables, &mut face.state); - match (tables.whatami, face.state.whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match face.state.whatami { + WhatAmI::Router => { hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat_mut!(tables).full_net(WhatAmI::Peer) { hat_mut!(tables).schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); } @@ -528,8 +524,8 @@ impl HatBaseTrait for HatCode { let list: LinkStateList = codec.read(&mut reader).unwrap(); let whatami = transport.get_whatami()?; - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match whatami { + WhatAmI::Router => { for (_, removed_node) in hat_mut!(tables) .routers_net .as_mut() @@ -551,9 +547,7 @@ impl HatBaseTrait for HatCode { hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if let Some(net) = hat_mut!(tables).peers_net.as_mut() { let changes = net.link_states(list.link_states, zid); if hat!(tables).full_net(WhatAmI::Peer) { @@ -570,12 +564,10 @@ impl HatBaseTrait for HatCode { ); } - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); @@ -611,26 +603,12 @@ impl HatBaseTrait for HatCode { face: &FaceState, routing_context: NodeId, ) -> NodeId { - match tables.whatami { - WhatAmI::Router => match face.whatami { - WhatAmI::Router => hat!(tables) - .routers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id), - WhatAmI::Peer => { - if hat!(tables).full_net(WhatAmI::Peer) { - hat!(tables) - .peers_net - .as_ref() - .unwrap() - .get_local_context(routing_context, face_hat!(face).link_id) - } else { - 0 - } - } - _ => 0, - }, + match face.whatami { + WhatAmI::Router => hat!(tables) + .routers_net + .as_ref() + .unwrap() + .get_local_context(routing_context, face_hat!(face).link_id), WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { hat!(tables) @@ -654,8 +632,8 @@ impl HatBaseTrait for HatCode { ) -> ZResult<()> { match (transport.get_zid(), transport.get_whatami()) { (Ok(zid), Ok(whatami)) => { - match (tables.whatami, whatami) { - (WhatAmI::Router, WhatAmI::Router) => { + match whatami { + WhatAmI::Router => { for (_, removed_node) in hat_mut!(tables) .routers_net .as_mut() @@ -676,9 +654,7 @@ impl HatBaseTrait for HatCode { hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Router); } - (WhatAmI::Router, WhatAmI::Peer) - | (WhatAmI::Peer, WhatAmI::Router) - | (WhatAmI::Peer, WhatAmI::Peer) => { + WhatAmI::Peer => { if hat!(tables).full_net(WhatAmI::Peer) { for (_, removed_node) in hat_mut!(tables) .peers_net @@ -690,12 +666,10 @@ impl HatBaseTrait for HatCode { queries_remove_node(tables, &removed_node.zid, WhatAmI::Peer); } - if tables.whatami == WhatAmI::Router { - hat_mut!(tables).shared_nodes = shared_nodes( - hat!(tables).routers_net.as_ref().unwrap(), - hat!(tables).peers_net.as_ref().unwrap(), - ); - } + hat_mut!(tables).shared_nodes = shared_nodes( + hat!(tables).routers_net.as_ref().unwrap(), + hat!(tables).peers_net.as_ref().unwrap(), + ); hat_mut!(tables) .schedule_compute_trees(tables_ref.clone(), WhatAmI::Peer); @@ -717,8 +691,7 @@ impl HatBaseTrait for HatCode { #[inline] fn ingress_filter(&self, tables: &Tables, face: &FaceState, expr: &mut RoutingExpr) -> bool { - tables.whatami != WhatAmI::Router - || face.whatami != WhatAmI::Peer + face.whatami != WhatAmI::Peer || hat!(tables).peers_net.is_none() || tables.zid == *hat!(tables).elect_router( @@ -742,8 +715,7 @@ impl HatBaseTrait for HatCode { _ => true, } { - let dst_master = tables.whatami != WhatAmI::Router - || out_face.whatami != WhatAmI::Peer + let dst_master = out_face.whatami != WhatAmI::Peer || hat!(tables).peers_net.is_none() || tables.zid == *hat!(tables).elect_router( From b69ae82882e53a9596af66e4e49d2940fc5b6c43 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 10 Jan 2024 10:35:34 +0100 Subject: [PATCH 030/122] Fix OAM handling --- zenoh/src/net/primitives/demux.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 6c4a272995..e162550c86 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -62,7 +62,13 @@ impl TransportPeerEventHandler for DeMux { NetworkBody::Request(m) => self.face.send_request(m), NetworkBody::Response(m) => self.face.send_response(m), NetworkBody::ResponseFinal(m) => self.face.send_response_final(m), - NetworkBody::OAM(_m) => (), + NetworkBody::OAM(m) => { + if let Some(transport) = self.transport.as_ref() { + let ctrl_lock = zlock!(self.face.tables.ctrl_lock); + let mut tables = zwrite!(self.face.tables.tables); + ctrl_lock.handle_oam(&mut tables, &self.face.tables, m, transport)? + } + } } // match ctx.msg.body { From 60f0ed017679cd2deecba70f685ec9fd6ec489b7 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 10 Jan 2024 10:37:23 +0100 Subject: [PATCH 031/122] Remove commented code --- zenoh/src/net/primitives/demux.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index e162550c86..9ea371d0cf 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -71,15 +71,6 @@ impl TransportPeerEventHandler for DeMux { } } - // match ctx.msg.body { - // NetworkBody::Declare(m) => self.face.send_declare(RoutingContext::new(m, ctx.inface)), - // NetworkBody::Push(m) => self.face.send_push(RoutingContext::new(m, ctx.inface)), - // NetworkBody::Request(m) => self.face.send_request(RoutingContext::new(m, ctx.inface)), - // NetworkBody::Response(m) => self.face.send_response(RoutingContext::new(m, ctx.inface)), - // NetworkBody::ResponseFinal(m) => self.face.send_response_final(RoutingContext::new(m, ctx.inface)), - // NetworkBody::OAM(_m) => (), - // } - Ok(()) } From 27e19747a30a2af7b0d50f9f3fbc0d013466cf52 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 11 Jan 2024 15:17:24 +0100 Subject: [PATCH 032/122] Simplified routes computation hats api --- zenoh/src/net/routing/dispatcher/pubsub.rs | 69 ++++++++++++++++-- zenoh/src/net/routing/dispatcher/queries.rs | 72 ++++++++++++++++--- zenoh/src/net/routing/dispatcher/resource.rs | 7 ++ zenoh/src/net/routing/dispatcher/tables.rs | 4 +- zenoh/src/net/routing/hat/client/mod.rs | 24 +++++-- zenoh/src/net/routing/hat/client/pubsub.rs | 29 ++------ zenoh/src/net/routing/hat/client/queries.rs | 31 ++------ .../src/net/routing/hat/linkstate_peer/mod.rs | 31 ++++++-- .../net/routing/hat/linkstate_peer/pubsub.rs | 44 ++---------- .../net/routing/hat/linkstate_peer/queries.rs | 41 ++--------- zenoh/src/net/routing/hat/mod.rs | 63 +++------------- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 23 ++++-- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 25 ++----- zenoh/src/net/routing/hat/p2p_peer/queries.rs | 33 ++------- zenoh/src/net/routing/hat/router/mod.rs | 43 +++++++++-- zenoh/src/net/routing/hat/router/pubsub.rs | 56 ++------------- zenoh/src/net/routing/hat/router/queries.rs | 62 ++-------------- 17 files changed, 284 insertions(+), 373 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 1e6643513c..c7ddd58504 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -16,6 +16,7 @@ use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::dispatcher::face::Face; use crate::net::routing::RoutingContext; +use std::collections::HashMap; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; @@ -26,8 +27,66 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; +fn compute_data_routes_(tables: &Tables, routes: &mut DataRoutes, expr: &mut RoutingExpr) { + let indexes = tables.hat_code.get_data_routes_entries(tables); + + let max_idx = indexes.routers.iter().max().unwrap(); + routes + .routers + .resize_with((*max_idx as usize) + 1, || Arc::new(HashMap::new())); + + for idx in indexes.routers { + routes.routers[idx as usize] = + tables + .hat_code + .compute_data_route(tables, expr, idx, WhatAmI::Router); + } + + let max_idx = indexes.peers.iter().max().unwrap(); + routes + .peers + .resize_with((*max_idx as usize) + 1, || Arc::new(HashMap::new())); + + for idx in indexes.peers { + routes.peers[idx as usize] = + tables + .hat_code + .compute_data_route(tables, expr, idx, WhatAmI::Peer); + } + + let max_idx = indexes.clients.iter().max().unwrap(); + routes + .clients + .resize_with((*max_idx as usize) + 1, || Arc::new(HashMap::new())); + + for idx in indexes.clients { + routes.clients[idx as usize] = + tables + .hat_code + .compute_data_route(tables, expr, idx, WhatAmI::Client); + } +} + +pub(crate) fn compute_data_routes(tables: &Tables, expr: &mut RoutingExpr) -> DataRoutes { + let mut routes = DataRoutes::default(); + compute_data_routes_(tables, &mut routes, expr); + routes +} + +pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + compute_data_routes_( + tables, + &mut res_mut.context_mut().data_routes, + &mut RoutingExpr::new(res, ""), + ); + } +} + pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { - tables.hat_code.clone().update_data_routes(tables, res); + update_data_routes(tables, res); tables.hat_code.clone().update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { @@ -44,14 +103,14 @@ pub(crate) fn compute_matches_data_routes<'a>( let mut expr = RoutingExpr::new(res, ""); routes.push(( res.clone(), - tables.hat_code.compute_data_routes(tables, &mut expr), + compute_data_routes(tables, &mut expr), tables.hat_code.compute_matching_pulls(tables, &mut expr), )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); - let match_routes = tables.hat_code.compute_data_routes(tables, &mut expr); + let match_routes = compute_data_routes(tables, &mut expr); let matching_pulls = tables.hat_code.compute_matching_pulls(tables, &mut expr); routes.push((match_, match_routes, matching_pulls)); } @@ -62,12 +121,12 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { - tables.hat_code.update_data_routes(tables, res); + update_data_routes(tables, res); tables.hat_code.update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - tables.hat_code.update_data_routes(tables, &mut match_); + update_data_routes(tables, &mut match_); tables.hat_code.update_matching_pulls(tables, &mut match_); } } diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index d80e44d330..7b3c4b3a1e 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -20,6 +20,7 @@ use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; use std::collections::HashMap; use std::sync::{Arc, Weak}; +use zenoh_config::WhatAmI; use zenoh_protocol::{ core::{Encoding, WireExpr}, network::{ @@ -37,8 +38,66 @@ pub(crate) struct Query { src_qid: RequestId, } +fn compute_query_routes_(tables: &Tables, routes: &mut QueryRoutes, expr: &mut RoutingExpr) { + let indexes = tables.hat_code.get_data_routes_entries(tables); + + let max_idx = indexes.routers.iter().max().unwrap(); + routes.routers.resize_with((*max_idx as usize) + 1, || { + Arc::new(QueryTargetQablSet::new()) + }); + + for idx in indexes.routers { + routes.routers[idx as usize] = + tables + .hat_code + .compute_query_route(tables, expr, idx, WhatAmI::Router); + } + + let max_idx = indexes.peers.iter().max().unwrap(); + routes.peers.resize_with((*max_idx as usize) + 1, || { + Arc::new(QueryTargetQablSet::new()) + }); + + for idx in indexes.peers { + routes.peers[idx as usize] = + tables + .hat_code + .compute_query_route(tables, expr, idx, WhatAmI::Peer); + } + + let max_idx = indexes.clients.iter().max().unwrap(); + routes.clients.resize_with((*max_idx as usize) + 1, || { + Arc::new(QueryTargetQablSet::new()) + }); + + for idx in indexes.clients { + routes.clients[idx as usize] = + tables + .hat_code + .compute_query_route(tables, expr, idx, WhatAmI::Client); + } +} + +pub(crate) fn compute_query_routes(tables: &Tables, res: &Arc) -> QueryRoutes { + let mut routes = QueryRoutes::default(); + compute_query_routes_(tables, &mut routes, &mut RoutingExpr::new(res, "")); + routes +} + +pub(crate) fn update_query_routes(tables: &Tables, res: &Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + compute_query_routes_( + tables, + &mut res_mut.context_mut().query_routes, + &mut RoutingExpr::new(res, ""), + ); + } +} + pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { - tables.hat_code.clone().compute_query_routes(tables, res); + compute_query_routes(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { compute_query_routes_from(tables, child); @@ -51,14 +110,11 @@ pub(crate) fn compute_matches_query_routes( ) -> Vec<(Arc, QueryRoutes)> { let mut routes = vec![]; if res.context.is_some() { - routes.push(( - res.clone(), - tables.hat_code.compute_query_routes(tables, res), - )); + routes.push((res.clone(), compute_query_routes(tables, res))); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - let match_routes = tables.hat_code.compute_query_routes(tables, &match_); + let match_routes = compute_query_routes(tables, &match_); routes.push((match_, match_routes)); } } @@ -68,11 +124,11 @@ pub(crate) fn compute_matches_query_routes( pub(crate) fn update_matches_query_routes(tables: &Tables, res: &Arc) { if res.context.is_some() { - tables.hat_code.update_query_routes(tables, res); + update_query_routes(tables, res); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { - tables.hat_code.update_query_routes(tables, &match_); + update_query_routes(tables, &match_); } } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 37e3a5b91b..8a210ed13b 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -62,6 +62,13 @@ pub(crate) struct SessionContext { pub(crate) last_values: HashMap, } +#[derive(Default)] +pub(crate) struct RoutesIndexes { + pub(crate) routers: Vec, + pub(crate) peers: Vec, + pub(crate) clients: Vec, +} + #[derive(Default)] pub(crate) struct DataRoutes { pub(crate) routers: Vec>, diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 47e894d0f0..c8b988f99f 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -146,8 +146,8 @@ impl Tables { } fn compute_routes(&mut self, res: &mut Arc) { - self.hat_code.clone().update_data_routes(self, res); - self.hat_code.clone().compute_query_routes(self, res); + update_data_routes(self, res); + compute_query_routes(self, res); } pub(crate) fn compute_matches_routes(&mut self, res: &mut Arc) { diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index b2dfe0dc80..36a576177a 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -17,7 +17,13 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use crate::{net::routing::dispatcher::face::Face, runtime::Runtime}; +use crate::{ + net::routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, + runtime::Runtime, +}; use self::{ pubsub::{pubsub_new_face, undeclare_client_subscription}, @@ -189,15 +195,12 @@ impl HatBaseTrait for HatCode { let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &mut expr), + compute_data_routes(&rtables, &mut expr), rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes(&rtables, &_match), - )); + matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); } drop(rtables); @@ -302,3 +305,12 @@ impl HatFace { } impl HatTrait for HatCode {} + +#[inline] +fn get_routes_entries() -> RoutesIndexes { + RoutesIndexes { + routers: vec![0], + peers: vec![0], + clients: vec![0], + } +} diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 97c5f4b927..a6162500a8 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -11,14 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::HashMap; @@ -450,27 +451,7 @@ impl HatPubSubTrait for HatCode { } } - fn compute_data_routes_( - &self, - tables: &Tables, - data_routes: &mut DataRoutes, - expr: &mut RoutingExpr, - ) { - let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); - - let routers_data_routes = &mut data_routes.routers; - routers_data_routes.clear(); - routers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - routers_data_routes[0] = route.clone(); - - let peers_data_routes = &mut data_routes.peers; - peers_data_routes.clear(); - peers_data_routes.resize_with(1, || Arc::new(HashMap::new())); - peers_data_routes[0] = route.clone(); - - let clients_data_routes = &mut data_routes.clients; - clients_data_routes.clear(); - clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); - clients_data_routes[0] = route; + fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { + get_routes_entries() } } diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 1e503478c7..90404b3d99 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -11,16 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{ - QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, -}; +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; @@ -437,27 +436,7 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_( - &self, - tables: &Tables, - routes: &mut QueryRoutes, - expr: &mut RoutingExpr, - ) { - let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); - - routes - .routers - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.routers[0] = route.clone(); - - routes - .peers - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.peers[0] = route.clone(); - - routes - .clients - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.clients[0] = route; + fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { + get_routes_entries() } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index 0b626e247c..ae0a3c577a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -31,7 +31,12 @@ use super::{ }; use crate::{ net::{ - codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, }, runtime::Runtime, }; @@ -305,15 +310,12 @@ impl HatBaseTrait for HatCode { let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &mut expr), + compute_data_routes(&rtables, &mut expr), rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes(&rtables, &_match), - )); + matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); } drop(rtables); @@ -503,3 +505,20 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option RoutesIndexes { + let indexes = hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .map(|i| i.index() as NodeId) + .collect::>(); + RoutesIndexes { + routers: indexes.clone(), + peers: indexes, + clients: vec![0], + } +} diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index b8aadd50b6..09cb08fa8a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -12,14 +12,15 @@ // ZettaScale Zenoh Team, // use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -869,42 +870,7 @@ impl HatPubSubTrait for HatCode { } } - fn compute_data_routes_( - &self, - tables: &Tables, - routes: &mut DataRoutes, - expr: &mut RoutingExpr, - ) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - routes.routers.clear(); - - routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - routes.peers.clear(); - - routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - let route = self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); - routes.routers[idx.index()] = route.clone(); - routes.peers[idx.index()] = route; - } - - let clients_data_routes = &mut routes.clients; - clients_data_routes.clear(); - clients_data_routes.resize_with(1, || Arc::new(HashMap::new())); - clients_data_routes[0] = self.compute_data_route(tables, expr, 0, WhatAmI::Peer); + fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { + get_routes_entries(tables) } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index 23295f8948..e0ddf456b4 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -12,16 +12,15 @@ // ZettaScale Zenoh Team, // use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{ - QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, -}; +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; @@ -935,37 +934,7 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_( - &self, - tables: &Tables, - routes: &mut QueryRoutes, - expr: &mut RoutingExpr, - ) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - - routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - let route = - self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); - routes.routers[idx.index()] = route.clone(); - routes.peers[idx.index()] = route; - } - routes - .clients - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.clients[0] = self.compute_query_route(tables, expr, 0, WhatAmI::Peer); + fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { + get_routes_entries(tables) } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index 552cac3357..bf2b2bc9d7 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -17,12 +17,15 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) -use super::dispatcher::{ - face::{Face, FaceState}, - tables::{ - DataRoutes, NodeId, PullCaches, QueryRoutes, QueryTargetQablSet, Resource, Route, - RoutingExpr, Tables, TablesLock, +use super::{ + dispatcher::{ + face::{Face, FaceState}, + tables::{ + NodeId, PullCaches, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, + TablesLock, + }, }, + router::RoutesIndexes, }; use crate::runtime::Runtime; use std::{any::Any, sync::Arc}; @@ -165,30 +168,7 @@ pub(crate) trait HatPubSubTrait { } } - fn compute_data_routes_( - &self, - tables: &Tables, - routes: &mut DataRoutes, - expr: &mut RoutingExpr, - ); - - fn compute_data_routes(&self, tables: &Tables, expr: &mut RoutingExpr) -> DataRoutes { - let mut routes = DataRoutes::default(); - self.compute_data_routes_(tables, &mut routes, expr); - routes - } - - fn update_data_routes(&self, tables: &Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - self.compute_data_routes_( - tables, - &mut res_mut.context_mut().data_routes, - &mut RoutingExpr::new(res, ""), - ); - } - } + fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; } pub(crate) trait HatQueriesTrait { @@ -215,30 +195,7 @@ pub(crate) trait HatQueriesTrait { source_type: WhatAmI, ) -> Arc; - fn compute_query_routes_( - &self, - tables: &Tables, - routes: &mut QueryRoutes, - expr: &mut RoutingExpr, - ); - - fn compute_query_routes(&self, tables: &Tables, res: &Arc) -> QueryRoutes { - let mut routes = QueryRoutes::default(); - self.compute_query_routes_(tables, &mut routes, &mut RoutingExpr::new(res, "")); - routes - } - - fn update_query_routes(&self, tables: &Tables, res: &Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - self.compute_query_routes_( - tables, - &mut res_mut.context_mut().query_routes, - &mut RoutingExpr::new(res, ""), - ); - } - } + fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes; fn compute_local_replies( &self, diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 36a023720a..148877a2f4 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -19,7 +19,12 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) use crate::{ net::{ - codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, }, runtime::Runtime, }; @@ -233,15 +238,12 @@ impl HatBaseTrait for HatCode { let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &mut expr), + compute_data_routes(&rtables, &mut expr), rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes(&rtables, &_match), - )); + matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); } drop(rtables); @@ -373,3 +375,12 @@ impl HatFace { } impl HatTrait for HatCode {} + +#[inline] +fn get_routes_entries() -> RoutesIndexes { + RoutesIndexes { + routers: vec![0], + peers: vec![0], + clients: vec![0], + } +} diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index e3685d66f2..a6162500a8 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -11,14 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::HashMap; @@ -450,23 +451,7 @@ impl HatPubSubTrait for HatCode { } } - fn compute_data_routes_( - &self, - tables: &Tables, - routes: &mut DataRoutes, - expr: &mut RoutingExpr, - ) { - let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Peer); - - routes.routers.resize_with(1, || Arc::new(HashMap::new())); - routes.routers[0] = route.clone(); - - routes.peers.resize_with(1, || Arc::new(HashMap::new())); - routes.peers[0] = route; - - let route = self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); - - routes.clients.resize_with(1, || Arc::new(HashMap::new())); - routes.clients[0] = route; + fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { + get_routes_entries() } } diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index c9dda3c4a4..90404b3d99 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -11,16 +11,15 @@ // Contributors: // ZettaScale Zenoh Team, // -use super::{face_hat, face_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{ - QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, -}; +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; @@ -437,29 +436,7 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_( - &self, - tables: &Tables, - routes: &mut QueryRoutes, - expr: &mut RoutingExpr, - ) { - let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Peer); - - routes - .routers - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.routers[0] = route.clone(); - - routes - .peers - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.peers[0] = route; - - let route = self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); - - routes - .clients - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.clients[0] = route; + fn get_query_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { + get_routes_entries() } } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 4f7d38da4c..62cdbc8a37 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -35,7 +35,12 @@ use super::{ }; use crate::{ net::{ - codec::Zenoh080Routing, protocol::linkstate::LinkStateList, routing::dispatcher::face::Face, + codec::Zenoh080Routing, + protocol::linkstate::LinkStateList, + routing::{ + dispatcher::face::Face, + router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + }, }, runtime::Runtime, }; @@ -475,15 +480,12 @@ impl HatBaseTrait for HatCode { let mut expr = RoutingExpr::new(&_match, ""); matches_data_routes.push(( _match.clone(), - rtables.hat_code.compute_data_routes(&rtables, &mut expr), + compute_data_routes(&rtables, &mut expr), rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { - matches_query_routes.push(( - _match.clone(), - rtables.hat_code.compute_query_routes(&rtables, &_match), - )); + matches_query_routes.push((_match.clone(), compute_query_routes(&rtables, &_match))); } drop(rtables); @@ -827,3 +829,32 @@ fn get_peer(tables: &Tables, face: &Arc, nodeid: NodeId) -> Option RoutesIndexes { + let routers_indexes = hat!(tables) + .routers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .map(|i| i.index() as NodeId) + .collect::>(); + let peers_indexes = if hat!(tables).full_net(WhatAmI::Peer) { + hat!(tables) + .peers_net + .as_ref() + .unwrap() + .graph + .node_indices() + .map(|i| i.index() as NodeId) + .collect::>() + } else { + vec![0] + }; + RoutesIndexes { + routers: routers_indexes, + peers: peers_indexes, + clients: vec![0], + } +} diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index 3ee7a40573..b6de714723 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -12,14 +12,15 @@ // ZettaScale Zenoh Team, // use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{DataRoutes, PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; @@ -1283,54 +1284,7 @@ impl HatPubSubTrait for HatCode { } } - fn compute_data_routes_( - &self, - tables: &Tables, - routes: &mut DataRoutes, - expr: &mut RoutingExpr, - ) { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.routers[idx.index()] = - self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(HashMap::new())); - - for idx in &indexes { - routes.peers[idx.index()] = - self.compute_data_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); - } - } else { - routes.peers.resize_with(1, || Arc::new(HashMap::new())); - routes.peers[0] = - self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Peer); - } - - routes.clients.resize_with(1, || Arc::new(HashMap::new())); - routes.clients[0] = - self.compute_data_route(tables, expr, NodeId::default(), WhatAmI::Client); + fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { + get_routes_entries(tables) } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index e881375085..fad19d36fe 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -12,16 +12,15 @@ // ZettaScale Zenoh Team, // use super::network::Network; -use super::{face_hat, face_hat_mut, hat, hat_mut, res_hat, res_hat_mut}; +use super::{face_hat, face_hat_mut, get_routes_entries, hat, hat_mut, res_hat, res_hat_mut}; use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{ - QueryRoutes, QueryTargetQabl, QueryTargetQablSet, RoutingExpr, -}; +use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; +use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; @@ -1428,58 +1427,7 @@ impl HatQueriesTrait for HatCode { result } - fn compute_query_routes_( - &self, - tables: &Tables, - routes: &mut QueryRoutes, - expr: &mut RoutingExpr, - ) { - let indexes = hat!(tables) - .routers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .routers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.routers[idx.index()] = - self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Router); - } - - if hat!(tables).full_net(WhatAmI::Peer) { - let indexes = hat!(tables) - .peers_net - .as_ref() - .unwrap() - .graph - .node_indices() - .collect::>(); - let max_idx = indexes.iter().max().unwrap(); - routes - .peers - .resize_with(max_idx.index() + 1, || Arc::new(QueryTargetQablSet::new())); - - for idx in &indexes { - routes.peers[idx.index()] = - self.compute_query_route(tables, expr, idx.index() as NodeId, WhatAmI::Peer); - } - } else { - routes - .peers - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.peers[0] = - self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Peer); - } - - routes - .clients - .resize_with(1, || Arc::new(QueryTargetQablSet::new())); - routes.clients[0] = - self.compute_query_route(tables, expr, NodeId::default(), WhatAmI::Client); + fn get_query_routes_entries(&self, tables: &Tables) -> RoutesIndexes { + get_routes_entries(tables) } } From 7cdfc4440003337ceba73f473539025bfcf4d9cf Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 11 Jan 2024 15:33:55 +0100 Subject: [PATCH 033/122] Move matching pulls computation out of hats --- zenoh/src/net/routing/dispatcher/pubsub.rs | 61 +++++++++++++++++-- zenoh/src/net/routing/hat/client/mod.rs | 6 +- zenoh/src/net/routing/hat/client/pubsub.rs | 32 +--------- .../src/net/routing/hat/linkstate_peer/mod.rs | 6 +- .../net/routing/hat/linkstate_peer/pubsub.rs | 32 +--------- zenoh/src/net/routing/hat/mod.rs | 34 +---------- zenoh/src/net/routing/hat/p2p_peer/mod.rs | 6 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 32 +--------- zenoh/src/net/routing/hat/router/mod.rs | 6 +- zenoh/src/net/routing/hat/router/pubsub.rs | 32 +--------- 10 files changed, 76 insertions(+), 171 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index c7ddd58504..c9fdce912c 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -16,10 +16,13 @@ use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::dispatcher::face::Face; use crate::net::routing::RoutingContext; +use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; use std::sync::RwLock; use zenoh_core::zread; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::network::declare::Mode; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, network::{declare::ext, Push}, @@ -87,7 +90,7 @@ pub(crate) fn update_data_routes(tables: &Tables, res: &mut Arc) { pub(crate) fn update_data_routes_from(tables: &mut Tables, res: &mut Arc) { update_data_routes(tables, res); - tables.hat_code.clone().update_matching_pulls(tables, res); + update_matching_pulls(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { update_data_routes_from(tables, child); @@ -104,14 +107,14 @@ pub(crate) fn compute_matches_data_routes<'a>( routes.push(( res.clone(), compute_data_routes(tables, &mut expr), - tables.hat_code.compute_matching_pulls(tables, &mut expr), + compute_matching_pulls(tables, &mut expr), )); for match_ in &res.context().matches { let match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { let mut expr = RoutingExpr::new(&match_, ""); let match_routes = compute_data_routes(tables, &mut expr); - let matching_pulls = tables.hat_code.compute_matching_pulls(tables, &mut expr); + let matching_pulls = compute_matching_pulls(tables, &mut expr); routes.push((match_, match_routes, matching_pulls)); } } @@ -122,12 +125,12 @@ pub(crate) fn compute_matches_data_routes<'a>( pub(crate) fn update_matches_data_routes<'a>(tables: &'a mut Tables, res: &'a mut Arc) { if res.context.is_some() { update_data_routes(tables, res); - tables.hat_code.update_matching_pulls(tables, res); + update_matching_pulls(tables, res); for match_ in &res.context().matches { let mut match_ = match_.upgrade().unwrap(); if !Arc::ptr_eq(&match_, res) { update_data_routes(tables, &mut match_); - tables.hat_code.update_matching_pulls(tables, &mut match_); + update_matching_pulls(tables, &mut match_); } } } @@ -222,6 +225,52 @@ pub(crate) fn get_local_data_route( }) } +fn compute_matching_pulls_(tables: &Tables, pull_caches: &mut PullCaches, expr: &mut RoutingExpr) { + let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { + ke + } else { + return; + }; + let res = Resource::get_resource(expr.prefix, expr.suffix); + let matches = res + .as_ref() + .and_then(|res| res.context.as_ref()) + .map(|ctx| Cow::from(&ctx.matches)) + .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); + + for mres in matches.iter() { + let mres = mres.upgrade().unwrap(); + for context in mres.session_ctxs.values() { + if let Some(subinfo) = &context.subs { + if subinfo.mode == Mode::Pull { + pull_caches.push(context.clone()); + } + } + } + } +} + +pub(crate) fn compute_matching_pulls(tables: &Tables, expr: &mut RoutingExpr) -> Arc { + let mut pull_caches = PullCaches::default(); + compute_matching_pulls_(tables, &mut pull_caches, expr); + Arc::new(pull_caches) +} + +pub(crate) fn update_matching_pulls(tables: &Tables, res: &mut Arc) { + if res.context.is_some() { + let mut res_mut = res.clone(); + let res_mut = get_mut_unchecked(&mut res_mut); + if res_mut.context_mut().matching_pulls.is_none() { + res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); + } + compute_matching_pulls_( + tables, + get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), + &mut RoutingExpr::new(res, ""), + ); + } +} + #[inline] fn get_matching_pulls( tables: &Tables, @@ -231,7 +280,7 @@ fn get_matching_pulls( res.as_ref() .and_then(|res| res.context.as_ref()) .and_then(|ctx| ctx.matching_pulls.clone()) - .unwrap_or_else(|| tables.hat_code.compute_matching_pulls(tables, expr)) + .unwrap_or_else(|| compute_matching_pulls(tables, expr)) } macro_rules! cache_data { diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs index 36a576177a..d5d05dca7e 100644 --- a/zenoh/src/net/routing/hat/client/mod.rs +++ b/zenoh/src/net/routing/hat/client/mod.rs @@ -20,7 +20,9 @@ use crate::{ net::routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, runtime::Runtime, }; @@ -196,7 +198,7 @@ impl HatBaseTrait for HatCode { matches_data_routes.push(( _match.clone(), compute_data_routes(&rtables, &mut expr), - rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index a6162500a8..7bd10c295a 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -16,7 +16,7 @@ use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; @@ -421,36 +421,6 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls_( - &self, - tables: &Tables, - pull_caches: &mut PullCaches, - expr: &mut RoutingExpr, - ) { - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return; - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - } - fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs index ae0a3c577a..a1e2e82c57 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs @@ -35,7 +35,9 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -311,7 +313,7 @@ impl HatBaseTrait for HatCode { matches_data_routes.push(( _match.clone(), compute_data_routes(&rtables, &mut expr), - rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 09cb08fa8a..6ddaae86d2 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -17,7 +17,7 @@ use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; @@ -840,36 +840,6 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls_( - &self, - tables: &Tables, - pull_caches: &mut PullCaches, - expr: &mut RoutingExpr, - ) { - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return; - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - } - fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index bf2b2bc9d7..cdbc7488e5 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -20,10 +20,7 @@ use super::{ dispatcher::{ face::{Face, FaceState}, - tables::{ - NodeId, PullCaches, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, - TablesLock, - }, + tables::{NodeId, QueryTargetQablSet, Resource, Route, RoutingExpr, Tables, TablesLock}, }, router::RoutesIndexes, }; @@ -39,7 +36,6 @@ use zenoh_protocol::{ }, }; use zenoh_result::ZResult; -use zenoh_sync::get_mut_unchecked; use zenoh_transport::unicast::TransportUnicast; mod client; @@ -140,34 +136,6 @@ pub(crate) trait HatPubSubTrait { source_type: WhatAmI, ) -> Arc; - fn compute_matching_pulls_( - &self, - tables: &Tables, - pull_caches: &mut PullCaches, - expr: &mut RoutingExpr, - ); - - fn compute_matching_pulls(&self, tables: &Tables, expr: &mut RoutingExpr) -> Arc { - let mut pull_caches = PullCaches::default(); - self.compute_matching_pulls_(tables, &mut pull_caches, expr); - Arc::new(pull_caches) - } - - fn update_matching_pulls(&self, tables: &Tables, res: &mut Arc) { - if res.context.is_some() { - let mut res_mut = res.clone(); - let res_mut = get_mut_unchecked(&mut res_mut); - if res_mut.context_mut().matching_pulls.is_none() { - res_mut.context_mut().matching_pulls = Some(Arc::new(PullCaches::default())); - } - self.compute_matching_pulls_( - tables, - get_mut_unchecked(res_mut.context_mut().matching_pulls.as_mut().unwrap()), - &mut RoutingExpr::new(res, ""), - ); - } - } - fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes; } diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs index 148877a2f4..c59491e4f1 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/mod.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/mod.rs @@ -23,7 +23,9 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -239,7 +241,7 @@ impl HatBaseTrait for HatCode { matches_data_routes.push(( _match.clone(), compute_data_routes(&rtables, &mut expr), - rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index a6162500a8..7bd10c295a 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -16,7 +16,7 @@ use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; @@ -421,36 +421,6 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls_( - &self, - tables: &Tables, - pull_caches: &mut PullCaches, - expr: &mut RoutingExpr, - ) { - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return; - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - } - fn get_data_routes_entries(&self, _tables: &Tables) -> RoutesIndexes { get_routes_entries() } diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs index 62cdbc8a37..c7d5d6c2d4 100644 --- a/zenoh/src/net/routing/hat/router/mod.rs +++ b/zenoh/src/net/routing/hat/router/mod.rs @@ -39,7 +39,9 @@ use crate::{ protocol::linkstate::LinkStateList, routing::{ dispatcher::face::Face, - router::{compute_data_routes, compute_query_routes, RoutesIndexes}, + router::{ + compute_data_routes, compute_matching_pulls, compute_query_routes, RoutesIndexes, + }, }, }, runtime::Runtime, @@ -481,7 +483,7 @@ impl HatBaseTrait for HatCode { matches_data_routes.push(( _match.clone(), compute_data_routes(&rtables, &mut expr), - rtables.hat_code.compute_matching_pulls(&rtables, &mut expr), + compute_matching_pulls(&rtables, &mut expr), )); } for _match in qabls_matches.drain(..) { diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index b6de714723..d8599132ec 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -17,7 +17,7 @@ use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; -use crate::net::routing::dispatcher::tables::{PullCaches, Route, RoutingExpr}; +use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; @@ -1254,36 +1254,6 @@ impl HatPubSubTrait for HatCode { Arc::new(route) } - fn compute_matching_pulls_( - &self, - tables: &Tables, - pull_caches: &mut PullCaches, - expr: &mut RoutingExpr, - ) { - let ke = if let Ok(ke) = OwnedKeyExpr::try_from(expr.full_expr()) { - ke - } else { - return; - }; - let res = Resource::get_resource(expr.prefix, expr.suffix); - let matches = res - .as_ref() - .and_then(|res| res.context.as_ref()) - .map(|ctx| Cow::from(&ctx.matches)) - .unwrap_or_else(|| Cow::from(Resource::get_matches(tables, &ke))); - - for mres in matches.iter() { - let mres = mres.upgrade().unwrap(); - for context in mres.session_ctxs.values() { - if let Some(subinfo) = &context.subs { - if subinfo.mode == Mode::Pull { - pull_caches.push(context.clone()); - } - } - } - } - } - fn get_data_routes_entries(&self, tables: &Tables) -> RoutesIndexes { get_routes_entries(tables) } From 567feb97c3cf0fd6fb1d606dee40d06d2391d81a Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 11 Jan 2024 16:12:01 +0100 Subject: [PATCH 034/122] Fix query routes update --- zenoh/src/net/routing/dispatcher/queries.rs | 6 +++--- zenoh/src/net/routing/dispatcher/resource.rs | 2 +- zenoh/src/net/routing/dispatcher/tables.rs | 10 +++++----- zenoh/src/net/routing/hat/linkstate_peer/queries.rs | 2 +- zenoh/src/net/routing/hat/router/queries.rs | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 7b3c4b3a1e..536e811e02 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -96,11 +96,11 @@ pub(crate) fn update_query_routes(tables: &Tables, res: &Arc) { } } -pub(crate) fn compute_query_routes_from(tables: &mut Tables, res: &mut Arc) { - compute_query_routes(tables, res); +pub(crate) fn update_query_routes_from(tables: &mut Tables, res: &mut Arc) { + update_query_routes(tables, res); let res = get_mut_unchecked(res); for child in res.childs.values_mut() { - compute_query_routes_from(tables, child); + update_query_routes_from(tables, child); } } diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 8a210ed13b..8a183088d6 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -693,7 +693,7 @@ pub fn register_expr( get_mut_unchecked(face) .remote_mappings .insert(expr_id, res.clone()); - wtables.compute_matches_routes(&mut res); + wtables.update_matches_routes(&mut res); drop(wtables); } }, diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index c8b988f99f..1b02812a39 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -145,20 +145,20 @@ impl Tables { self.faces.values().find(|face| face.zid == *zid) } - fn compute_routes(&mut self, res: &mut Arc) { + fn update_routes(&mut self, res: &mut Arc) { update_data_routes(self, res); - compute_query_routes(self, res); + update_query_routes(self, res); } - pub(crate) fn compute_matches_routes(&mut self, res: &mut Arc) { + pub(crate) fn update_matches_routes(&mut self, res: &mut Arc) { if res.context.is_some() { - self.compute_routes(res); + self.update_routes(res); let resclone = res.clone(); for match_ in &mut get_mut_unchecked(res).context_mut().matches { let match_ = &mut match_.upgrade().unwrap(); if !Arc::ptr_eq(match_, &resclone) && match_.context.is_some() { - self.compute_routes(match_); + self.update_routes(match_); } } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index e0ddf456b4..e2a1ce7a0a 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -726,7 +726,7 @@ pub(super) fn queries_tree_change(tables: &mut Tables, new_childs: &[Vec Date: Fri, 12 Jan 2024 15:37:45 +0100 Subject: [PATCH 035/122] Fix copy-paste error --- zenoh/src/net/routing/dispatcher/queries.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 536e811e02..6d5fb3b848 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -39,7 +39,7 @@ pub(crate) struct Query { } fn compute_query_routes_(tables: &Tables, routes: &mut QueryRoutes, expr: &mut RoutingExpr) { - let indexes = tables.hat_code.get_data_routes_entries(tables); + let indexes = tables.hat_code.get_query_routes_entries(tables); let max_idx = indexes.routers.iter().max().unwrap(); routes.routers.resize_with((*max_idx as usize) + 1, || { From 62049b1745b045b40f8939df35d56aff64fca76b Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Mon, 15 Jan 2024 10:11:15 +0100 Subject: [PATCH 036/122] Renaming --- zenoh/src/net/primitives/demux.rs | 12 ++--- zenoh/src/net/primitives/mux.rs | 32 ++++++------- zenoh/src/net/routing/dispatcher/tables.rs | 8 ++-- zenoh/src/net/routing/interceptor/mod.rs | 55 ++++++++++++---------- zenoh/src/net/routing/router.rs | 22 ++++----- 5 files changed, 66 insertions(+), 63 deletions(-) diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs index 9ea371d0cf..95b89268df 100644 --- a/zenoh/src/net/primitives/demux.rs +++ b/zenoh/src/net/primitives/demux.rs @@ -14,7 +14,7 @@ use super::Primitives; use crate::net::routing::{ dispatcher::face::Face, - interceptor::{InterceptTrait, InterceptsChain}, + interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; use std::any::Any; @@ -27,19 +27,19 @@ use zenoh_transport::TransportPeerEventHandler; pub struct DeMux { face: Face, pub(crate) transport: Option, - pub(crate) intercept: InterceptsChain, + pub(crate) interceptor: InterceptorsChain, } impl DeMux { pub(crate) fn new( face: Face, transport: Option, - intercept: InterceptsChain, + interceptor: InterceptorsChain, ) -> Self { Self { face, transport, - intercept, + interceptor, } } } @@ -47,9 +47,9 @@ impl DeMux { impl TransportPeerEventHandler for DeMux { #[inline] fn handle_message(&self, mut msg: NetworkMessage) -> ZResult<()> { - if !self.intercept.intercepts.is_empty() { + if !self.interceptor.interceptors.is_empty() { let ctx = RoutingContext::new_in(msg, self.face.clone()); - let ctx = match self.intercept.intercept(ctx) { + let ctx = match self.interceptor.intercept(ctx) { Some(ctx) => ctx, None => return Ok(()), }; diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index a31100e1d7..935d74ac5a 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -16,7 +16,7 @@ use std::sync::Arc; use super::{EPrimitives, Primitives}; use crate::net::routing::{ dispatcher::{face::Face, tables::TablesLock}, - interceptor::{InterceptTrait, InterceptsChain}, + interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; use zenoh_protocol::network::{ @@ -28,7 +28,7 @@ pub struct Mux { pub handler: TransportUnicast, pub(crate) fid: usize, pub(crate) tables: Arc, - pub(crate) intercept: InterceptsChain, + pub(crate) interceptor: InterceptorsChain, } impl Mux { @@ -36,13 +36,13 @@ impl Mux { handler: TransportUnicast, fid: usize, tables: Arc, - intercept: InterceptsChain, + interceptor: InterceptorsChain, ) -> Mux { Mux { handler, fid, tables, - intercept, + interceptor, } } } @@ -65,7 +65,7 @@ impl Primitives for Mux { state: face.clone(), }, ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -88,7 +88,7 @@ impl Primitives for Mux { state: face.clone(), }, ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -111,7 +111,7 @@ impl Primitives for Mux { state: face.clone(), }, ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -134,7 +134,7 @@ impl Primitives for Mux { state: face.clone(), }, ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -157,7 +157,7 @@ impl Primitives for Mux { state: face.clone(), }, ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -181,7 +181,7 @@ impl EPrimitives for Mux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -198,7 +198,7 @@ impl EPrimitives for Mux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -215,7 +215,7 @@ impl EPrimitives for Mux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -232,7 +232,7 @@ impl EPrimitives for Mux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -249,7 +249,7 @@ impl EPrimitives for Mux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -263,7 +263,7 @@ pub struct McastMux { pub handler: TransportMulticast, pub(crate) fid: usize, pub(crate) tables: Arc, - pub(crate) intercept: InterceptsChain, + pub(crate) intercept: InterceptorsChain, } impl McastMux { @@ -271,7 +271,7 @@ impl McastMux { handler: TransportMulticast, fid: usize, tables: Arc, - intercept: InterceptsChain, + intercept: InterceptorsChain, ) -> McastMux { McastMux { handler, diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs index 1b02812a39..2fcfdf27c7 100644 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -17,8 +17,8 @@ pub use super::queries::*; pub use super::resource::*; use crate::net::routing::hat; use crate::net::routing::hat::HatTrait; -use crate::net::routing::interceptor::interceptors; -use crate::net::routing::interceptor::Interceptor; +use crate::net::routing::interceptor::interceptor_factories; +use crate::net::routing::interceptor::InterceptorFactory; use std::any::Any; use std::collections::HashMap; use std::sync::{Arc, Weak}; @@ -69,7 +69,7 @@ pub struct Tables { pub(crate) faces: HashMap>, pub(crate) mcast_groups: Vec>, pub(crate) mcast_faces: Vec>, - pub(crate) interceptors: Vec, + pub(crate) interceptors: Vec, pub(crate) pull_caches_lock: Mutex<()>, pub(crate) hat: Box, pub(crate) hat_code: Arc, // TODO make this a Box @@ -96,7 +96,7 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], - interceptors: interceptors(config), + interceptors: interceptor_factories(config), pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 171d93b021..22e0e4e549 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -22,58 +22,61 @@ use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; -pub(crate) trait InterceptTrait { +pub(crate) trait InterceptorTrait { fn intercept( &self, ctx: RoutingContext, ) -> Option>; } -pub(crate) type Intercept = Box; -pub(crate) type IngressIntercept = Intercept; -pub(crate) type EgressIntercept = Intercept; +pub(crate) type Interceptor = Box; +pub(crate) type IngressInterceptor = Interceptor; +pub(crate) type EgressInterceptor = Interceptor; -pub(crate) trait InterceptorTrait { +pub(crate) trait InterceptorFactoryTrait { fn new_transport_unicast( &self, transport: &TransportUnicast, - ) -> (Option, Option); - fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; - fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; + ) -> (Option, Option); + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; } -pub(crate) type Interceptor = Box; +pub(crate) type InterceptorFactory = Box; -pub(crate) fn interceptors(_config: &Config) -> Vec { +pub(crate) fn interceptor_factories(_config: &Config) -> Vec { // Add interceptors here + // TODO build the list of intercetors with the correct order from the config // vec![Box::new(LoggerInterceptor {})] vec![] } -pub(crate) struct InterceptsChain { - pub(crate) intercepts: Vec, +pub(crate) struct InterceptorsChain { + pub(crate) interceptors: Vec, } -impl InterceptsChain { +impl InterceptorsChain { #[allow(dead_code)] pub(crate) fn empty() -> Self { - Self { intercepts: vec![] } + Self { + interceptors: vec![], + } } } -impl From> for InterceptsChain { - fn from(intercepts: Vec) -> Self { - InterceptsChain { intercepts } +impl From> for InterceptorsChain { + fn from(interceptors: Vec) -> Self { + InterceptorsChain { interceptors } } } -impl InterceptTrait for InterceptsChain { +impl InterceptorTrait for InterceptorsChain { fn intercept( &self, mut ctx: RoutingContext, ) -> Option> { - for intercept in &self.intercepts { - match intercept.intercept(ctx) { + for interceptor in &self.interceptors { + match interceptor.intercept(ctx) { Some(newctx) => ctx = newctx, None => { log::trace!("Msg intercepted!"); @@ -87,7 +90,7 @@ impl InterceptTrait for InterceptsChain { pub(crate) struct IngressMsgLogger {} -impl InterceptTrait for IngressMsgLogger { +impl InterceptorTrait for IngressMsgLogger { fn intercept( &self, ctx: RoutingContext, @@ -105,7 +108,7 @@ impl InterceptTrait for IngressMsgLogger { } pub(crate) struct EgressMsgLogger {} -impl InterceptTrait for EgressMsgLogger { +impl InterceptorTrait for EgressMsgLogger { fn intercept( &self, ctx: RoutingContext, @@ -117,11 +120,11 @@ impl InterceptTrait for EgressMsgLogger { pub(crate) struct LoggerInterceptor {} -impl InterceptorTrait for LoggerInterceptor { +impl InterceptorFactoryTrait for LoggerInterceptor { fn new_transport_unicast( &self, transport: &TransportUnicast, - ) -> (Option, Option) { + ) -> (Option, Option) { log::debug!("New transport unicast {:?}", transport); ( Some(Box::new(IngressMsgLogger {})), @@ -129,12 +132,12 @@ impl InterceptorTrait for LoggerInterceptor { ) } - fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { + fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { log::debug!("New transport multicast {:?}", transport); Some(Box::new(EgressMsgLogger {})) } - fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { + fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { log::debug!("New peer multicast {:?}", transport); Some(Box::new(IngressMsgLogger {})) } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 3b6e8eba9c..79bfe70025 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -18,15 +18,15 @@ pub use super::dispatcher::resource::*; use super::dispatcher::tables::Tables; use super::dispatcher::tables::TablesLock; use super::hat; -use super::interceptor::EgressIntercept; -use super::interceptor::InterceptsChain; +use super::interceptor::EgressInterceptor; +use super::interceptor::InterceptorsChain; use super::runtime::Runtime; use crate::net::primitives::DeMux; use crate::net::primitives::DummyPrimitives; use crate::net::primitives::EPrimitives; use crate::net::primitives::McastMux; use crate::net::primitives::Mux; -use crate::net::routing::interceptor::IngressIntercept; +use crate::net::routing::interceptor::IngressInterceptor; use std::str::FromStr; use std::sync::Arc; use std::sync::{Mutex, RwLock}; @@ -120,8 +120,8 @@ impl Router { .map(|itor| itor.new_transport_unicast(&transport)) .unzip(); let (ingress, egress) = ( - InterceptsChain::from(ingress.into_iter().flatten().collect::>()), - InterceptsChain::from(egress.into_iter().flatten().collect::>()), + InterceptorsChain::from(ingress.into_iter().flatten().collect::>()), + InterceptorsChain::from(egress.into_iter().flatten().collect::>()), ); let newface = tables .faces @@ -162,12 +162,12 @@ impl Router { let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; - let intercept = InterceptsChain::from( + let interceptor = InterceptorsChain::from( tables .interceptors .iter() .filter_map(|itor| itor.new_transport_multicast(&transport)) - .collect::>(), + .collect::>(), ); tables.mcast_groups.push(FaceState::new( fid, @@ -180,7 +180,7 @@ impl Router { transport.clone(), fid, self.tables.clone(), - intercept, + interceptor, )), Some(transport), ctrl_lock.new_face(), @@ -201,12 +201,12 @@ impl Router { let mut tables = zwrite!(self.tables.tables); let fid = tables.face_counter; tables.face_counter += 1; - let intercept = InterceptsChain::from( + let interceptor = InterceptorsChain::from( tables .interceptors .iter() .filter_map(|itor| itor.new_peer_multicast(&transport)) - .collect::>(), + .collect::>(), ); let face_state = FaceState::new( fid, @@ -230,7 +230,7 @@ impl Router { state: face_state, }, None, - intercept, + interceptor, ))) } } From a1301d52bf0d400d761d23f6db06bd0f645790c5 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 17 Jan 2024 17:02:15 +0100 Subject: [PATCH 037/122] Add missing query routes deactivations --- zenoh/src/net/routing/hat/linkstate_peer/queries.rs | 1 + zenoh/src/net/routing/hat/router/queries.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index e2a1ce7a0a..c56ccadc5b 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -554,6 +554,7 @@ fn forget_peer_queryable( drop(rtables); let mut wtables = zwrite!(tables.tables); undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); + disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); let rtables = zread!(tables.tables); diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index 56cbe2b37f..d4125e2561 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -856,6 +856,7 @@ fn forget_peer_queryable( let local_info = local_router_qabl_info(&wtables, &res); register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); } + disable_matches_query_routes(&mut wtables, &mut res); drop(wtables); let rtables = zread!(tables.tables); From 6d9f2f6015ef05bb43c741a0ecdf77fbbdf4e7a0 Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Wed, 17 Jan 2024 21:57:41 +0100 Subject: [PATCH 038/122] Refactor code --- zenoh/src/net/routing/dispatcher/face.rs | 12 +- zenoh/src/net/routing/dispatcher/pubsub.rs | 108 +++++- zenoh/src/net/routing/dispatcher/queries.rs | 100 +++++- zenoh/src/net/routing/hat/client/pubsub.rs | 159 ++------ zenoh/src/net/routing/hat/client/queries.rs | 108 +----- .../net/routing/hat/linkstate_peer/pubsub.rs | 217 ++--------- .../net/routing/hat/linkstate_peer/queries.rs | 206 ++--------- zenoh/src/net/routing/hat/mod.rs | 20 +- zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 158 ++------ zenoh/src/net/routing/hat/p2p_peer/queries.rs | 108 +----- zenoh/src/net/routing/hat/router/pubsub.rs | 340 +++--------------- zenoh/src/net/routing/hat/router/queries.rs | 323 +++-------------- 12 files changed, 502 insertions(+), 1357 deletions(-) diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs index 54251f04b9..35b2a641af 100644 --- a/zenoh/src/net/routing/dispatcher/face.rs +++ b/zenoh/src/net/routing/dispatcher/face.rs @@ -136,7 +136,8 @@ impl Primitives for Face { unregister_expr(&self.tables, &mut self.state.clone(), m.id); } zenoh_protocol::network::DeclareBody::DeclareSubscriber(m) => { - ctrl_lock.declare_subscription( + declare_subscription( + &ctrl_lock, &self.tables, &mut self.state.clone(), &m.wire_expr, @@ -145,7 +146,8 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::UndeclareSubscriber(m) => { - ctrl_lock.forget_subscription( + undeclare_subscription( + &ctrl_lock, &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, @@ -153,7 +155,8 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::DeclareQueryable(m) => { - ctrl_lock.declare_queryable( + declare_queryable( + &ctrl_lock, &self.tables, &mut self.state.clone(), &m.wire_expr, @@ -162,7 +165,8 @@ impl Primitives for Face { ); } zenoh_protocol::network::DeclareBody::UndeclareQueryable(m) => { - ctrl_lock.forget_queryable( + undeclare_queryable( + &ctrl_lock, &self.tables, &mut self.state.clone(), &m.ext_wire_expr.wire_expr, diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index c9fdce912c..2cdeb7d06f 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -15,13 +15,15 @@ use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; use crate::net::routing::dispatcher::face::Face; +use crate::net::routing::hat::HatTrait; use crate::net::routing::RoutingContext; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::Arc; use std::sync::RwLock; +use std::sync::{Arc, MutexGuard}; use zenoh_core::zread; -use zenoh_protocol::core::key_expr::OwnedKeyExpr; +use zenoh_protocol::core::key_expr::{keyexpr, OwnedKeyExpr}; +use zenoh_protocol::network::declare::subscriber::ext::SubscriberInfo; use zenoh_protocol::network::declare::Mode; use zenoh_protocol::{ core::{WhatAmI, WireExpr}, @@ -30,6 +32,108 @@ use zenoh_protocol::{ }; use zenoh_sync::get_mut_unchecked; +pub(crate) fn declare_subscription( + hat_code: &MutexGuard<'_, Box>, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + sub_info: &SubscriberInfo, + node_id: NodeId, +) { + log::debug!("Declare subscription {}", face); + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_subscription(&mut wtables, face, &mut res, sub_info, node_id); + + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes, matching_pulls) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); + } + drop(wtables); + } + None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + } +} + +pub(crate) fn undeclare_subscription( + hat_code: &MutexGuard<'_, Box>, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: NodeId, +) { + log::debug!("Undeclare subscription {}", face); + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + + hat_code.undeclare_subscription(&mut wtables, face, &mut res, node_id); + + disable_matches_data_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_data_routes = compute_matches_data_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, data_routes, matching_pulls) in matches_data_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_data_routes(data_routes); + get_mut_unchecked(&mut res) + .context_mut() + .update_matching_pulls(matching_pulls); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown subscription!"), + }, + None => log::error!("Undeclare subscription with unknown scope!"), + } +} + fn compute_data_routes_(tables: &Tables, routes: &mut DataRoutes, expr: &mut RoutingExpr) { let indexes = tables.hat_code.get_data_routes_entries(tables); diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs index 6d5fb3b848..1c6781fac2 100644 --- a/zenoh/src/net/routing/dispatcher/queries.rs +++ b/zenoh/src/net/routing/dispatcher/queries.rs @@ -1,3 +1,4 @@ +use crate::net::routing::hat::HatTrait; use crate::net::routing::RoutingContext; // @@ -19,8 +20,10 @@ use super::tables::NodeId; use super::tables::{RoutingExpr, Tables, TablesLock}; use async_trait::async_trait; use std::collections::HashMap; -use std::sync::{Arc, Weak}; +use std::sync::{Arc, MutexGuard, Weak}; use zenoh_config::WhatAmI; +use zenoh_protocol::core::key_expr::keyexpr; +use zenoh_protocol::network::declare::queryable::ext::QueryableInfo; use zenoh_protocol::{ core::{Encoding, WireExpr}, network::{ @@ -38,6 +41,101 @@ pub(crate) struct Query { src_qid: RequestId, } +pub(crate) fn declare_queryable( + hat_code: &MutexGuard<'_, Box>, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + qabl_info: &QueryableInfo, + node_id: NodeId, +) { + log::debug!("Register queryable {}", face); + let rtables = zread!(tables.tables); + match rtables + .get_mapping(face, &expr.scope, expr.mapping) + .cloned() + { + Some(mut prefix) => { + let res = Resource::get_resource(&prefix, &expr.suffix); + let (mut res, mut wtables) = + if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { + drop(rtables); + let wtables = zwrite!(tables.tables); + (res.unwrap(), wtables) + } else { + let mut fullexpr = prefix.expr(); + fullexpr.push_str(expr.suffix.as_ref()); + let mut matches = keyexpr::new(fullexpr.as_str()) + .map(|ke| Resource::get_matches(&rtables, ke)) + .unwrap_or_default(); + drop(rtables); + let mut wtables = zwrite!(tables.tables); + let mut res = + Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); + matches.push(Arc::downgrade(&res)); + Resource::match_resource(&wtables, &mut res, matches); + (res, wtables) + }; + + hat_code.declare_queryable(&mut wtables, face, &mut res, qabl_info, node_id); + + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + drop(wtables); + } + None => log::error!("Declare queryable for unknown scope {}!", expr.scope), + } +} + +pub(crate) fn undeclare_queryable( + hat_code: &MutexGuard<'_, Box>, + tables: &TablesLock, + face: &mut Arc, + expr: &WireExpr, + node_id: NodeId, +) { + let rtables = zread!(tables.tables); + match rtables.get_mapping(face, &expr.scope, expr.mapping) { + Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { + Some(mut res) => { + drop(rtables); + let mut wtables = zwrite!(tables.tables); + + hat_code.undeclare_queryable(&mut wtables, face, &mut res, node_id); + + disable_matches_query_routes(&mut wtables, &mut res); + drop(wtables); + + let rtables = zread!(tables.tables); + let matches_query_routes = compute_matches_query_routes(&rtables, &res); + drop(rtables); + + let wtables = zwrite!(tables.tables); + for (mut res, query_routes) in matches_query_routes { + get_mut_unchecked(&mut res) + .context_mut() + .update_query_routes(query_routes); + } + Resource::clean(&mut res); + drop(wtables); + } + None => log::error!("Undeclare unknown queryable!"), + }, + None => log::error!("Undeclare queryable with unknown scope!"), + } +} + fn compute_query_routes_(tables: &Tables, routes: &mut QueryRoutes, expr: &mut RoutingExpr) { let indexes = tables.hat_code.get_query_routes_entries(tables); diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs index 7bd10c295a..4a1b0add4b 100644 --- a/zenoh/src/net/routing/hat/client/pubsub.rs +++ b/zenoh/src/net/routing/hat/client/pubsub.rs @@ -14,20 +14,18 @@ use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; -use zenoh_core::zread; +use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr}, + core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -122,84 +120,35 @@ fn register_client_subscription( } fn declare_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, ) { - log::debug!("Register client subscription"); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_subscription(&mut wtables, face, &mut res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + propagate_simple_subscription(tables, res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &tables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } @@ -276,42 +225,12 @@ pub(super) fn undeclare_client_subscription( } } } - fn forget_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_subscription(&mut wtables, face, &mut res); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), - } + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -334,25 +253,23 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { impl HatPubSubTrait for HatCode { fn declare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - declare_client_subscription(tables, rtables, face, expr, sub_info); + declare_client_subscription(tables, face, res, sub_info); } - fn forget_subscription( + fn undeclare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - forget_client_subscription(tables, rtables, face, expr); + forget_client_subscription(tables, face, res); } fn compute_data_route( diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs index 90404b3d99..7815871bdf 100644 --- a/zenoh/src/net/routing/hat/client/queries.rs +++ b/zenoh/src/net/routing/hat/client/queries.rs @@ -14,22 +14,21 @@ use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; +use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr}, + core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -138,58 +137,13 @@ fn register_client_queryable( } fn declare_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register client queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_queryable(&mut wtables, face, &mut res, qabl_info); - propagate_simple_queryable(&mut wtables, &res, Some(face)); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), - } + register_client_queryable(tables, face, res, qabl_info); + propagate_simple_queryable(tables, res, Some(face)); } #[inline] @@ -270,37 +224,11 @@ pub(super) fn undeclare_client_queryable( } fn forget_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_queryable(&mut wtables, face, &mut res); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), - } + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -323,25 +251,23 @@ lazy_static::lazy_static! { impl HatQueriesTrait for HatCode { fn declare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - declare_client_queryable(tables, rtables, face, expr, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } - fn forget_queryable( + fn undeclare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - forget_client_queryable(tables, rtables, face, expr); + forget_client_queryable(tables, face, res); } fn compute_query_route( diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs index 6ddaae86d2..49f10556c5 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs @@ -17,19 +17,18 @@ use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::{Arc, RwLockReadGuard}; -use zenoh_core::zread; +use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -188,62 +187,13 @@ fn register_peer_subscription( } fn declare_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } + register_peer_subscription(tables, face, res, sub_info, peer); } fn register_client_subscription( @@ -286,64 +236,16 @@ fn register_client_subscription( } fn declare_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, ) { - log::debug!("Register client subscription"); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_subscription(&mut wtables, face, &mut res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_peer_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), - } + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = tables.zid; + register_peer_subscription(tables, face, res, &propa_sub_info, zid); } #[inline] @@ -497,40 +399,12 @@ fn undeclare_peer_subscription( } fn forget_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, peer: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer subscription!"), - }, - None => log::error!("Undeclare peer subscription with unknown scope!"), - } + undeclare_peer_subscription(tables, Some(face), res, peer); } pub(super) fn undeclare_client_subscription( @@ -574,40 +448,11 @@ pub(super) fn undeclare_client_subscription( } fn forget_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_subscription(&mut wtables, face, &mut res); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), - } + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -727,36 +572,34 @@ fn insert_faces_for_subs( impl HatPubSubTrait for HatCode { fn declare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, ) { - let rtables = zread!(tables.tables); if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) + declare_client_subscription(tables, face, res, sub_info) } } - fn forget_subscription( + fn undeclare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ) { - let rtables = zread!(tables.tables); if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer); + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, res, &peer); } } else { - forget_client_subscription(tables, rtables, face, expr); + forget_client_subscription(tables, face, res); } } diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs index c56ccadc5b..8f32cbfcac 100644 --- a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs +++ b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs @@ -17,8 +17,8 @@ use super::{get_peer, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; @@ -26,12 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; +use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -257,58 +257,14 @@ fn register_peer_queryable( } fn declare_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register peer queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - let face = Some(face); - register_peer_queryable(&mut wtables, face, &mut res, qabl_info, peer); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } + let face = Some(face); + register_peer_queryable(tables, face, res, qabl_info, peer); } fn register_client_queryable( @@ -337,61 +293,16 @@ fn register_client_queryable( } fn declare_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register client queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_queryable(&mut wtables, face, &mut res, qabl_info); - - let local_details = local_peer_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_peer_queryable(&mut wtables, Some(face), &mut res, &local_details, zid); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), - } + register_client_queryable(tables, face, res, qabl_info); + + let local_details = local_peer_qabl_info(tables, res); + let zid = tables.zid; + register_peer_queryable(tables, Some(face), res, &local_details, zid); } #[inline] @@ -542,38 +453,12 @@ fn undeclare_peer_queryable( } fn forget_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, peer: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer queryable!"), - }, - None => log::error!("Undeclare peer queryable with unknown scope!"), - } + undeclare_peer_queryable(tables, Some(face), res, peer); } pub(super) fn undeclare_client_queryable( @@ -622,37 +507,11 @@ pub(super) fn undeclare_client_queryable( } fn forget_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_queryable(&mut wtables, face, &mut res); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), - } + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -780,37 +639,34 @@ lazy_static::lazy_static! { impl HatQueriesTrait for HatCode { fn declare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, ) { - let rtables = zread!(tables.tables); if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer); + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_queryable(tables, face, res, qabl_info, peer); } } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } } - fn forget_queryable( + fn undeclare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ) { - let rtables = zread!(tables.tables); - if face.whatami != WhatAmI::Client { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer); + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, res, &peer); } } else { - forget_client_queryable(tables, rtables, face, expr); + forget_client_queryable(tables, face, res); } } diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs index cdbc7488e5..83064136df 100644 --- a/zenoh/src/net/routing/hat/mod.rs +++ b/zenoh/src/net/routing/hat/mod.rs @@ -114,17 +114,17 @@ pub(crate) trait HatBaseTrait { pub(crate) trait HatPubSubTrait { fn declare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, ); - fn forget_subscription( + fn undeclare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ); @@ -142,17 +142,17 @@ pub(crate) trait HatPubSubTrait { pub(crate) trait HatQueriesTrait { fn declare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, ); - fn forget_queryable( + fn undeclare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ); fn compute_query_route( diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs index 7bd10c295a..986b9cab25 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs @@ -14,20 +14,18 @@ use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; -use zenoh_core::zread; +use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr}, + core::{Reliability, WhatAmI}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -122,84 +120,35 @@ fn register_client_subscription( } fn declare_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, ) { - log::debug!("Register client subscription"); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_subscription(&mut wtables, face, &mut res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - - propagate_simple_subscription(&mut wtables, &res, &propa_sub_info, face); - // This introduced a buffer overflow on windows - // TODO: Let's deactivate this on windows until Fixed - #[cfg(not(windows))] - for mcast_group in &wtables.mcast_groups { - mcast_group - .primitives - .send_declare(RoutingContext::with_expr( - Declare { - ext_qos: ext::QoSType::declare_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - body: DeclareBody::DeclareSubscriber(DeclareSubscriber { - id: 0, // TODO - wire_expr: res.expr().into(), - ext_info: *sub_info, - }), - }, - res.expr(), - )) - } + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), + propagate_simple_subscription(tables, res, &propa_sub_info, face); + // This introduced a buffer overflow on windows + // TODO: Let's deactivate this on windows until Fixed + #[cfg(not(windows))] + for mcast_group in &tables.mcast_groups { + mcast_group + .primitives + .send_declare(RoutingContext::with_expr( + Declare { + ext_qos: ext::QoSType::declare_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + body: DeclareBody::DeclareSubscriber(DeclareSubscriber { + id: 0, // TODO + wire_expr: res.expr().into(), + ext_info: *sub_info, + }), + }, + res.expr(), + )) } } @@ -278,40 +227,11 @@ pub(super) fn undeclare_client_subscription( } fn forget_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_subscription(&mut wtables, face, &mut res); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), - } + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -334,25 +254,23 @@ pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { impl HatPubSubTrait for HatCode { fn declare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - declare_client_subscription(tables, rtables, face, expr, sub_info); + declare_client_subscription(tables, face, res, sub_info); } - fn forget_subscription( + fn undeclare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - forget_client_subscription(tables, rtables, face, expr); + forget_client_subscription(tables, face, res); } fn compute_data_route( diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs index 90404b3d99..7815871bdf 100644 --- a/zenoh/src/net/routing/hat/p2p_peer/queries.rs +++ b/zenoh/src/net/routing/hat/p2p_peer/queries.rs @@ -14,22 +14,21 @@ use super::{face_hat, face_hat_mut, get_routes_entries}; use super::{HatCode, HatFace}; use crate::net::routing::dispatcher::face::FaceState; -use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use ordered_float::OrderedFloat; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; +use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr}, + core::{WhatAmI, WireExpr}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -138,58 +137,13 @@ fn register_client_queryable( } fn declare_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register client queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_queryable(&mut wtables, face, &mut res, qabl_info); - propagate_simple_queryable(&mut wtables, &res, Some(face)); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), - } + register_client_queryable(tables, face, res, qabl_info); + propagate_simple_queryable(tables, res, Some(face)); } #[inline] @@ -270,37 +224,11 @@ pub(super) fn undeclare_client_queryable( } fn forget_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_queryable(&mut wtables, face, &mut res); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), - } + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, _face: &mut Arc) { @@ -323,25 +251,23 @@ lazy_static::lazy_static! { impl HatQueriesTrait for HatCode { fn declare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - declare_client_queryable(tables, rtables, face, expr, qabl_info); + declare_client_queryable(tables, face, res, qabl_info); } - fn forget_queryable( + fn undeclare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, _node_id: NodeId, ) { - let rtables = zread!(tables.tables); - forget_client_queryable(tables, rtables, face, expr); + forget_client_queryable(tables, face, res); } fn compute_query_route( diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs index d8599132ec..866186d96e 100644 --- a/zenoh/src/net/routing/hat/router/pubsub.rs +++ b/zenoh/src/net/routing/hat/router/pubsub.rs @@ -17,19 +17,18 @@ use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::pubsub::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{Route, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatPubSubTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; -use std::sync::{Arc, RwLockReadGuard}; -use zenoh_core::zread; +use std::sync::Arc; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, Reliability, WhatAmI, WireExpr, ZenohId}, + core::{Reliability, WhatAmI, ZenohId}, network::declare::{ common::ext::WireExprType, ext, subscriber::ext::SubscriberInfo, Declare, DeclareBody, DeclareSubscriber, Mode, UndeclareSubscriber, @@ -211,62 +210,13 @@ fn register_router_subscription( } fn declare_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, router: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_subscription(&mut wtables, face, &mut res, sub_info, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } + register_router_subscription(tables, face, res, sub_info, router); } fn register_peer_subscription( @@ -290,66 +240,17 @@ fn register_peer_subscription( } fn declare_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, peer: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_peer_subscription(&mut wtables, face, &mut res, sub_info, peer); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!( - "Declare router subscription for unknown scope {}!", - expr.scope - ), - } + register_peer_subscription(tables, face, res, sub_info, peer); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = tables.zid; + register_router_subscription(tables, face, res, &propa_sub_info, zid); } fn register_client_subscription( @@ -392,64 +293,16 @@ fn register_client_subscription( } fn declare_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, ) { - log::debug!("Register client subscription"); - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_subscription(&mut wtables, face, &mut res, sub_info); - let mut propa_sub_info = *sub_info; - propa_sub_info.mode = Mode::Push; - let zid = wtables.zid; - register_router_subscription(&mut wtables, face, &mut res, &propa_sub_info, zid); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - drop(wtables); - } - None => log::error!("Declare subscription for unknown scope {}!", expr.scope), - } + register_client_subscription(tables, face, res, sub_info); + let mut propa_sub_info = *sub_info; + propa_sub_info.mode = Mode::Push; + let zid = tables.zid; + register_router_subscription(tables, face, res, &propa_sub_info, zid); } #[inline] @@ -657,40 +510,12 @@ fn undeclare_router_subscription( } fn forget_router_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, router: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_subscription(&mut wtables, Some(face), &mut res, router); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router subscription!"), - }, - None => log::error!("Undeclare router subscription with unknown scope!"), - } + undeclare_router_subscription(tables, Some(face), res, router); } fn unregister_peer_subscription(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { @@ -721,45 +546,17 @@ fn undeclare_peer_subscription( } fn forget_peer_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, peer: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_subscription(&mut wtables, Some(face), &mut res, peer); - let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); - let peer_subs = remote_peer_subs(&wtables, &res); - let zid = wtables.zid; - if !client_subs && !peer_subs { - undeclare_router_subscription(&mut wtables, None, &mut res, &zid); - } - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer subscription!"), - }, - None => log::error!("Undeclare peer subscription with unknown scope!"), + undeclare_peer_subscription(tables, Some(face), res, peer); + let client_subs = res.session_ctxs.values().any(|ctx| ctx.subs.is_some()); + let peer_subs = remote_peer_subs(tables, res); + let zid = tables.zid; + if !client_subs && !peer_subs { + undeclare_router_subscription(tables, None, res, &zid); } } @@ -807,40 +604,11 @@ pub(super) fn undeclare_client_subscription( } fn forget_client_subscription( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_subscription(&mut wtables, face, &mut res); - disable_matches_data_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_data_routes = compute_matches_data_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, data_routes, matching_pulls) in matches_data_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_data_routes(data_routes); - get_mut_unchecked(&mut res) - .context_mut() - .update_matching_pulls(matching_pulls); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown subscription!"), - }, - None => log::error!("Undeclare subscription with unknown scope!"), - } + undeclare_client_subscription(tables, face, res); } pub(super) fn pubsub_new_face(tables: &mut Tables, face: &mut Arc) { @@ -1103,56 +871,54 @@ fn insert_faces_for_subs( impl HatPubSubTrait for HatCode { fn declare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, sub_info: &SubscriberInfo, node_id: NodeId, ) { - let rtables = zread!(tables.tables); match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_subscription(tables, rtables, face, expr, sub_info, router) + if let Some(router) = get_router(tables, face, node_id) { + declare_router_subscription(tables, face, res, sub_info, router) } } WhatAmI::Peer => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_subscription(tables, rtables, face, expr, sub_info, peer) + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_subscription(tables, face, res, sub_info, peer) } } else { - declare_client_subscription(tables, rtables, face, expr, sub_info) + declare_client_subscription(tables, face, res, sub_info) } } - _ => declare_client_subscription(tables, rtables, face, expr, sub_info), + _ => declare_client_subscription(tables, face, res, sub_info), } } - fn forget_subscription( + fn undeclare_subscription( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ) { - let rtables = zread!(tables.tables); match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_subscription(tables, rtables, face, expr, &router) + if let Some(router) = get_router(tables, face, node_id) { + forget_router_subscription(tables, face, res, &router) } } WhatAmI::Peer => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_subscription(tables, rtables, face, expr, &peer) + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_subscription(tables, face, res, &peer) } } else { - forget_client_subscription(tables, rtables, face, expr) + forget_client_subscription(tables, face, res) } } - _ => forget_client_subscription(tables, rtables, face, expr), + _ => forget_client_subscription(tables, face, res), } } diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs index d4125e2561..e84d927d01 100644 --- a/zenoh/src/net/routing/hat/router/queries.rs +++ b/zenoh/src/net/routing/hat/router/queries.rs @@ -17,8 +17,8 @@ use super::{get_peer, get_router, HatCode, HatContext, HatFace, HatTables}; use crate::net::routing::dispatcher::face::FaceState; use crate::net::routing::dispatcher::queries::*; use crate::net::routing::dispatcher::resource::{NodeId, Resource, SessionContext}; +use crate::net::routing::dispatcher::tables::Tables; use crate::net::routing::dispatcher::tables::{QueryTargetQabl, QueryTargetQablSet, RoutingExpr}; -use crate::net::routing::dispatcher::tables::{Tables, TablesLock}; use crate::net::routing::hat::HatQueriesTrait; use crate::net::routing::router::RoutesIndexes; use crate::net::routing::{RoutingContext, PREFIX_LIVELINESS}; @@ -26,12 +26,12 @@ use ordered_float::OrderedFloat; use petgraph::graph::NodeIndex; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, RwLockReadGuard}; +use std::sync::Arc; use zenoh_buffers::ZBuf; use zenoh_protocol::core::key_expr::include::{Includer, DEFAULT_INCLUDER}; use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{ - core::{key_expr::keyexpr, WhatAmI, WireExpr, ZenohId}, + core::{WhatAmI, WireExpr, ZenohId}, network::declare::{ common::ext::WireExprType, ext, queryable::ext::QueryableInfo, Declare, DeclareBody, DeclareQueryable, UndeclareQueryable, @@ -356,57 +356,13 @@ fn register_router_queryable( } fn declare_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, router: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register router queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - register_router_queryable(&mut wtables, Some(face), &mut res, qabl_info, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } + register_router_queryable(tables, Some(face), res, qabl_info, router); } fn register_peer_queryable( @@ -431,61 +387,17 @@ fn register_peer_queryable( } fn declare_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, peer: ZenohId, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register peer queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - let mut face = Some(face); - register_peer_queryable(&mut wtables, face.as_deref_mut(), &mut res, qabl_info, peer); - let local_info = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, face, &mut res, &local_info, zid); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare router queryable for unknown scope {}!", expr.scope), - } + let mut face = Some(face); + register_peer_queryable(tables, face.as_deref_mut(), res, qabl_info, peer); + let local_info = local_router_qabl_info(tables, res); + let zid = tables.zid; + register_router_queryable(tables, face, res, &local_info, zid); } fn register_client_queryable( @@ -514,60 +426,15 @@ fn register_client_queryable( } fn declare_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, ) { - match rtables - .get_mapping(face, &expr.scope, expr.mapping) - .cloned() - { - Some(mut prefix) => { - let res = Resource::get_resource(&prefix, &expr.suffix); - let (mut res, mut wtables) = - if res.as_ref().map(|r| r.context.is_some()).unwrap_or(false) { - drop(rtables); - let wtables = zwrite!(tables.tables); - (res.unwrap(), wtables) - } else { - let mut fullexpr = prefix.expr(); - fullexpr.push_str(expr.suffix.as_ref()); - log::debug!("Register client queryable {}", fullexpr); - let mut matches = keyexpr::new(fullexpr.as_str()) - .map(|ke| Resource::get_matches(&rtables, ke)) - .unwrap_or_default(); - drop(rtables); - let mut wtables = zwrite!(tables.tables); - let mut res = - Resource::make_resource(&mut wtables, &mut prefix, expr.suffix.as_ref()); - matches.push(Arc::downgrade(&res)); - Resource::match_resource(&wtables, &mut res, matches); - (res, wtables) - }; - - register_client_queryable(&mut wtables, face, &mut res, qabl_info); - let local_details = local_router_qabl_info(&wtables, &res); - let zid = wtables.zid; - register_router_queryable(&mut wtables, Some(face), &mut res, &local_details, zid); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - drop(wtables); - } - None => log::error!("Declare queryable for unknown scope {}!", expr.scope), - } + register_client_queryable(tables, face, res, qabl_info); + let local_details = local_router_qabl_info(tables, res); + let zid = tables.zid; + register_router_queryable(tables, Some(face), res, &local_details, zid); } #[inline] @@ -776,38 +643,12 @@ fn undeclare_router_queryable( } fn forget_router_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, router: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_router_queryable(&mut wtables, Some(face), &mut res, router); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown router queryable!"), - }, - None => log::error!("Undeclare router queryable with unknown scope!"), - } + undeclare_router_queryable(tables, Some(face), res, router); } fn unregister_peer_queryable(tables: &mut Tables, res: &mut Arc, peer: &ZenohId) { @@ -834,47 +675,21 @@ fn undeclare_peer_queryable( } fn forget_peer_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, peer: &ZenohId, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_peer_queryable(&mut wtables, Some(face), &mut res, peer); + undeclare_peer_queryable(tables, Some(face), res, peer); - let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); - let peer_qabls = remote_peer_qabls(&wtables, &res); - let zid = wtables.zid; - if !client_qabls && !peer_qabls { - undeclare_router_queryable(&mut wtables, None, &mut res, &zid); - } else { - let local_info = local_router_qabl_info(&wtables, &res); - register_router_queryable(&mut wtables, None, &mut res, &local_info, zid); - } - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown peer queryable!"), - }, - None => log::error!("Undeclare peer queryable with unknown scope!"), + let client_qabls = res.session_ctxs.values().any(|ctx| ctx.qabl.is_some()); + let peer_qabls = remote_peer_qabls(tables, res); + let zid = tables.zid; + if !client_qabls && !peer_qabls { + undeclare_router_queryable(tables, None, res, &zid); + } else { + let local_info = local_router_qabl_info(tables, res); + register_router_queryable(tables, None, res, &local_info, zid); } } @@ -926,37 +741,11 @@ pub(super) fn undeclare_client_queryable( } fn forget_client_queryable( - tables: &TablesLock, - rtables: RwLockReadGuard, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, ) { - match rtables.get_mapping(face, &expr.scope, expr.mapping) { - Some(prefix) => match Resource::get_resource(prefix, expr.suffix.as_ref()) { - Some(mut res) => { - drop(rtables); - let mut wtables = zwrite!(tables.tables); - undeclare_client_queryable(&mut wtables, face, &mut res); - disable_matches_query_routes(&mut wtables, &mut res); - drop(wtables); - - let rtables = zread!(tables.tables); - let matches_query_routes = compute_matches_query_routes(&rtables, &res); - drop(rtables); - - let wtables = zwrite!(tables.tables); - for (mut res, query_routes) in matches_query_routes { - get_mut_unchecked(&mut res) - .context_mut() - .update_query_routes(query_routes); - } - Resource::clean(&mut res); - drop(wtables); - } - None => log::error!("Undeclare unknown queryable!"), - }, - None => log::error!("Undeclare queryable with unknown scope!"), - } + undeclare_client_queryable(tables, face, res); } pub(super) fn queries_new_face(tables: &mut Tables, face: &mut Arc) { @@ -1233,56 +1022,54 @@ lazy_static::lazy_static! { impl HatQueriesTrait for HatCode { fn declare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, qabl_info: &QueryableInfo, node_id: NodeId, ) { - let rtables = zread!(tables.tables); match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(&rtables, face, node_id) { - declare_router_queryable(tables, rtables, face, expr, qabl_info, router) + if let Some(router) = get_router(tables, face, node_id) { + declare_router_queryable(tables, face, res, qabl_info, router) } } WhatAmI::Peer => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - declare_peer_queryable(tables, rtables, face, expr, qabl_info, peer) + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + declare_peer_queryable(tables, face, res, qabl_info, peer) } } else { - declare_client_queryable(tables, rtables, face, expr, qabl_info) + declare_client_queryable(tables, face, res, qabl_info) } } - _ => declare_client_queryable(tables, rtables, face, expr, qabl_info), + _ => declare_client_queryable(tables, face, res, qabl_info), } } - fn forget_queryable( + fn undeclare_queryable( &self, - tables: &TablesLock, + tables: &mut Tables, face: &mut Arc, - expr: &WireExpr, + res: &mut Arc, node_id: NodeId, ) { - let rtables = zread!(tables.tables); match face.whatami { WhatAmI::Router => { - if let Some(router) = get_router(&rtables, face, node_id) { - forget_router_queryable(tables, rtables, face, expr, &router) + if let Some(router) = get_router(tables, face, node_id) { + forget_router_queryable(tables, face, res, &router) } } WhatAmI::Peer => { - if hat!(rtables).full_net(WhatAmI::Peer) { - if let Some(peer) = get_peer(&rtables, face, node_id) { - forget_peer_queryable(tables, rtables, face, expr, &peer) + if hat!(tables).full_net(WhatAmI::Peer) { + if let Some(peer) = get_peer(tables, face, node_id) { + forget_peer_queryable(tables, face, res, &peer) } } else { - forget_client_queryable(tables, rtables, face, expr) + forget_client_queryable(tables, face, res) } } - _ => forget_client_queryable(tables, rtables, face, expr), + _ => forget_client_queryable(tables, face, res), } } From 7af8480a988e1452622e1dbc8da427e9c68a18cf Mon Sep 17 00:00:00 2001 From: OlivierHecart Date: Thu, 18 Jan 2024 17:14:45 +0100 Subject: [PATCH 039/122] Improve perfromances --- zenoh/src/net/primitives/mod.rs | 4 +- zenoh/src/net/primitives/mux.rs | 262 +++++++++------------ zenoh/src/net/routing/dispatcher/pubsub.rs | 79 +++---- zenoh/src/net/routing/router.rs | 27 +-- zenoh/src/net/runtime/adminspace.rs | 4 +- zenoh/src/session.rs | 4 +- 6 files changed, 154 insertions(+), 226 deletions(-) diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs index 7c0ea9f938..cbfa2e3716 100644 --- a/zenoh/src/net/primitives/mod.rs +++ b/zenoh/src/net/primitives/mod.rs @@ -37,7 +37,7 @@ pub trait Primitives: Send + Sync { pub(crate) trait EPrimitives: Send + Sync { fn send_declare(&self, ctx: RoutingContext); - fn send_push(&self, ctx: RoutingContext); + fn send_push(&self, msg: Push); fn send_request(&self, ctx: RoutingContext); @@ -68,7 +68,7 @@ impl Primitives for DummyPrimitives { impl EPrimitives for DummyPrimitives { fn send_declare(&self, _ctx: RoutingContext) {} - fn send_push(&self, _ctx: RoutingContext) {} + fn send_push(&self, _msg: Push) {} fn send_request(&self, _ctx: RoutingContext) {} diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs index 935d74ac5a..17aad11311 100644 --- a/zenoh/src/net/primitives/mux.rs +++ b/zenoh/src/net/primitives/mux.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - // // Copyright (c) 2023 ZettaScale Technology // @@ -15,10 +13,11 @@ use std::sync::Arc; // use super::{EPrimitives, Primitives}; use crate::net::routing::{ - dispatcher::{face::Face, tables::TablesLock}, + dispatcher::face::Face, interceptor::{InterceptorTrait, InterceptorsChain}, RoutingContext, }; +use std::sync::OnceLock; use zenoh_protocol::network::{ Declare, NetworkBody, NetworkMessage, Push, Request, Response, ResponseFinal, }; @@ -26,22 +25,15 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub struct Mux { pub handler: TransportUnicast, - pub(crate) fid: usize, - pub(crate) tables: Arc, + pub(crate) face: OnceLock, pub(crate) interceptor: InterceptorsChain, } impl Mux { - pub(crate) fn new( - handler: TransportUnicast, - fid: usize, - tables: Arc, - interceptor: InterceptorsChain, - ) -> Mux { + pub(crate) fn new(handler: TransportUnicast, interceptor: InterceptorsChain) -> Mux { Mux { handler, - fid, - tables, + face: OnceLock::new(), interceptor, } } @@ -54,20 +46,15 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - let tables = zread!(self.tables.tables); - let face = tables.faces.get(&self.fid).cloned(); - drop(tables); - if let Some(face) = face { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -77,20 +64,15 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - let tables = zread!(self.tables.tables); - let face = tables.faces.get(&self.fid).cloned(); - drop(tables); - if let Some(face) = face { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -100,20 +82,15 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - let tables = zread!(self.tables.tables); - let face = tables.faces.get(&self.fid).cloned(); - drop(tables); - if let Some(face) = face { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -123,20 +100,15 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - let tables = zread!(self.tables.tables); - let face = tables.faces.get(&self.fid).cloned(); - drop(tables); - if let Some(face) = face { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -146,20 +118,15 @@ impl Primitives for Mux { #[cfg(feature = "stats")] size: None, }; - let tables = zread!(self.tables.tables); - let face = tables.faces.get(&self.fid).cloned(); - drop(tables); - if let Some(face) = face { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -186,20 +153,21 @@ impl EPrimitives for Mux { } } - fn send_push(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Push(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_push(&self, msg: Push) { + let msg = NetworkMessage { + body: NetworkBody::Push(msg), + #[cfg(feature = "stats")] + size: None, }; - if let Some(ctx) = self.interceptor.intercept(ctx) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -261,23 +229,16 @@ impl EPrimitives for Mux { pub struct McastMux { pub handler: TransportMulticast, - pub(crate) fid: usize, - pub(crate) tables: Arc, - pub(crate) intercept: InterceptorsChain, + pub(crate) face: OnceLock, + pub(crate) interceptor: InterceptorsChain, } impl McastMux { - pub(crate) fn new( - handler: TransportMulticast, - fid: usize, - tables: Arc, - intercept: InterceptorsChain, - ) -> McastMux { + pub(crate) fn new(handler: TransportMulticast, interceptor: InterceptorsChain) -> McastMux { McastMux { handler, - fid, - tables, - intercept, + face: OnceLock::new(), + interceptor, } } } @@ -289,17 +250,15 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -309,17 +268,15 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -329,17 +286,15 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -349,17 +304,15 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -369,17 +322,15 @@ impl Primitives for McastMux { #[cfg(feature = "stats")] size: None, }; - if let Some(face) = zread!(self.tables.tables).faces.get(&self.fid).cloned() { - let ctx = RoutingContext::new_in( - msg, - Face { - tables: self.tables.clone(), - state: face.clone(), - }, - ); - if let Some(ctx) = self.intercept.intercept(ctx) { + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -401,25 +352,26 @@ impl EPrimitives for McastMux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } - fn send_push(&self, ctx: RoutingContext) { - let ctx = RoutingContext { - msg: NetworkMessage { - body: NetworkBody::Push(ctx.msg), - #[cfg(feature = "stats")] - size: None, - }, - inface: ctx.inface, - outface: ctx.outface, - prefix: ctx.prefix, - full_expr: ctx.full_expr, + fn send_push(&self, msg: Push) { + let msg = NetworkMessage { + body: NetworkBody::Push(msg), + #[cfg(feature = "stats")] + size: None, }; - if let Some(ctx) = self.intercept.intercept(ctx) { - let _ = self.handler.schedule(ctx.msg); + if self.interceptor.interceptors.is_empty() { + let _ = self.handler.schedule(msg); + } else if let Some(face) = self.face.get() { + let ctx = RoutingContext::new_out(msg, face.clone()); + if let Some(ctx) = self.interceptor.intercept(ctx) { + let _ = self.handler.schedule(ctx.msg); + } + } else { + log::error!("Uninitialized multiplexer!"); } } @@ -435,7 +387,7 @@ impl EPrimitives for McastMux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -452,7 +404,7 @@ impl EPrimitives for McastMux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } @@ -469,7 +421,7 @@ impl EPrimitives for McastMux { prefix: ctx.prefix, full_expr: ctx.full_expr, }; - if let Some(ctx) = self.intercept.intercept(ctx) { + if let Some(ctx) = self.interceptor.intercept(ctx) { let _ = self.handler.schedule(ctx.msg); } } diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs index 2cdeb7d06f..54dfd0daea 100644 --- a/zenoh/src/net/routing/dispatcher/pubsub.rs +++ b/zenoh/src/net/routing/dispatcher/pubsub.rs @@ -14,9 +14,7 @@ use super::face::FaceState; use super::resource::{DataRoutes, Direction, PullCaches, Resource}; use super::tables::{NodeId, Route, RoutingExpr, Tables, TablesLock}; -use crate::net::routing::dispatcher::face::Face; use crate::net::routing::hat::HatTrait; -use crate::net::routing::RoutingContext; use std::borrow::Cow; use std::collections::HashMap; use std::sync::RwLock; @@ -478,19 +476,13 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::new_out( - Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload, - }, - Face { - tables: tables_ref.clone(), - state: outface.clone(), - }, - )) + outface.primitives.send_push(Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + payload, + }) } } else { if !matching_pulls.is_empty() { @@ -519,19 +511,13 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::new_out( - Push { - wire_expr: key_expr, - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: context }, - payload: payload.clone(), - }, - Face { - tables: tables_ref.clone(), - state: outface.clone(), - }, - )) + outface.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: context }, + payload: payload.clone(), + }) } } else { drop(tables); @@ -552,19 +538,13 @@ pub fn full_reentrant_route_data( inc_stats!(face, tx, admin, payload) } - outface.primitives.send_push(RoutingContext::new_out( - Push { - wire_expr: key_expr.into(), - ext_qos, - ext_tstamp: None, - ext_nodeid: ext::NodeIdType { node_id: *context }, - payload: payload.clone(), - }, - Face { - tables: tables_ref.clone(), - state: outface.clone(), - }, - )) + outface.primitives.send_push(Push { + wire_expr: key_expr.into(), + ext_qos, + ext_tstamp: None, + ext_nodeid: ext::NodeIdType { node_id: *context }, + payload: payload.clone(), + }) } } } @@ -603,16 +583,13 @@ pub fn pull_data(tables_ref: &RwLock, face: &Arc, expr: WireE drop(lock); drop(tables); for (key_expr, payload) in route { - face.primitives.send_push(RoutingContext::with_expr( - Push { - wire_expr: key_expr, - ext_qos: ext::QoSType::push_default(), - ext_tstamp: None, - ext_nodeid: ext::NodeIdType::default(), - payload, - }, - "".to_string(), - )); // TODO + face.primitives.send_push(Push { + wire_expr: key_expr, + ext_qos: ext::QoSType::push_default(), + ext_tstamp: None, + ext_nodeid: ext::NodeIdType::default(), + payload, + }); } } None => { diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs index 79bfe70025..2309d67daf 100644 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -123,6 +123,7 @@ impl Router { InterceptorsChain::from(ingress.into_iter().flatten().collect::>()), InterceptorsChain::from(egress.into_iter().flatten().collect::>()), ); + let mux = Arc::new(Mux::new(transport.clone(), egress)); let newface = tables .faces .entry(fid) @@ -134,12 +135,7 @@ impl Router { false, #[cfg(feature = "stats")] Some(stats), - Arc::new(Mux::new( - transport.clone(), - fid, - self.tables.clone(), - egress, - )), + mux.clone(), None, ctrl_lock.new_face(), ) @@ -152,6 +148,8 @@ impl Router { state: newface, }; + let _ = mux.face.set(face.clone()); + ctrl_lock.new_transport_unicast_face(&mut tables, &self.tables, &mut face, &transport)?; Ok(Arc::new(DeMux::new(face, Some(transport), ingress))) @@ -169,22 +167,23 @@ impl Router { .filter_map(|itor| itor.new_transport_multicast(&transport)) .collect::>(), ); - tables.mcast_groups.push(FaceState::new( + let mux = Arc::new(McastMux::new(transport.clone(), interceptor)); + let face = FaceState::new( fid, ZenohId::from_str("1").unwrap(), WhatAmI::Peer, false, #[cfg(feature = "stats")] None, - Arc::new(McastMux::new( - transport.clone(), - fid, - self.tables.clone(), - interceptor, - )), + mux.clone(), Some(transport), ctrl_lock.new_face(), - )); + ); + let _ = mux.face.set(Face { + state: face.clone(), + tables: self.tables.clone(), + }); + tables.mcast_groups.push(face); // recompute routes let mut root_res = tables.root_res.clone(); diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs index eab91ef5f6..4fa8e6c187 100644 --- a/zenoh/src/net/runtime/adminspace.rs +++ b/zenoh/src/net/runtime/adminspace.rs @@ -412,8 +412,8 @@ impl crate::net::primitives::EPrimitives for AdminSpace { } #[inline] - fn send_push(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_push(ctx.msg) + fn send_push(&self, msg: Push) { + (self as &dyn Primitives).send_push(msg) } #[inline] diff --git a/zenoh/src/session.rs b/zenoh/src/session.rs index 57f7279f91..d8da11231a 100644 --- a/zenoh/src/session.rs +++ b/zenoh/src/session.rs @@ -2676,8 +2676,8 @@ impl crate::net::primitives::EPrimitives for Session { } #[inline] - fn send_push(&self, ctx: crate::net::routing::RoutingContext) { - (self as &dyn Primitives).send_push(ctx.msg) + fn send_push(&self, msg: Push) { + (self as &dyn Primitives).send_push(msg) } #[inline] From d47b4fd08f572108bca204805d2df1247e08a700 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 26 Jan 2024 10:50:02 +0100 Subject: [PATCH 040/122] WIP: added pep pdp points --- zenoh/src/net/routing/interceptor/authz.rs | 153 +++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 zenoh/src/net/routing/interceptor/authz.rs diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs new file mode 100644 index 0000000000..ae1188352c --- /dev/null +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -0,0 +1,153 @@ +use std::fmt; + +// use casbin::{CoreApi, Enforcer}; +use super::RoutingContext; +use casbin::prelude::*; +use zenoh_keyexpr::keyexpr_tree::box_tree::KeBoxTree; +use zenoh_protocol::network::NetworkMessage; +use zenoh_result::ZResult; + +pub enum Action { + NONE, + READ, + WRITE, + READWRITE, + SUBSCRIBEDECLARE, +} + +impl fmt::Debug for Action { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} +impl fmt::Display for Action { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +pub trait ZAuth { + fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; +} + +impl ZAuth for Enforcer { + fn authz_testing(&self, zid: String, ke: String, act: String) -> ZResult { + /* + (zid, keyexpr, act): these values should be extraced from the authn code. + has to be atomic, to avoid another process sending the wrong info + */ + + if let Ok(authorized) = self.enforce((zid.clone(), ke.clone(), act.clone())) { + Ok(authorized) + } else { + println!("policy enforcement error"); + Ok(false) + } + } +} + +pub async fn start_authz() -> Result { + // get file value + let mut e = Enforcer::new("keymatch_model.conf", "keymatch_policy.csv").await?; + e.enable_log(true); + Ok(e) +} + +struct StaticPolicy { + //should have values needed for the policy to work + //config for local static policy +} + +//policy builder (add policy type and ruleset) + +//subject_builder (add ID, attributes, roles) + +//request_builder (add subject, resource, action) //do we need one? + +pub struct Subject { + //ID and list of possible policy attributes + id: Option, //can convert uuid to string and back + attributes: Option>, +} + +pub struct PolicyEnforcer { + policy_config: String, +} + +impl PolicyEnforcer { + pub fn init_policy(pol_string: String) -> ZResult { + let pe = Self { + policy_config: pol_string, + }; + Ok(pe) + } + pub fn policy_enforcement_point( + &self, + ctx: RoutingContext, + action: Action, + ) -> ZResult { + /* + input: msg body + output: allow deny + function: depending on the msg, calls build_permission_request(msg,action), passes its output to policy_decision_point() + uses allow/deny output to block or pass the msg to routing table + */ + let ke = ctx.full_expr().unwrap().to_owned(); + let zid = ctx.inface().unwrap().state.zid; + //build subject here + //for now just set it + let s = Subject { + id: Some(zid.to_string()), + attributes: None, + }; + + let decision = self.policy_decision_point(s, ke, action); + Ok(false) + } + pub fn permission_request_builder( + msg: zenoh_protocol::network::NetworkMessage, + action: Action, + ) { + + /* + input: msg body + output: (sub,ke,act) + function: extract relevant info from the incoming msg body + build the subject [ID, Attributes and Roles] + then use that to build the request [subject, key-expression, action ] + return request to PEP + */ + /* + PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) + */ + } + pub fn policy_decision_point(&self, subject: Subject, ke: String, action: u8) -> ZResult { + /* + input: (sub,ke,act) + output: true(allow)/false(deny) + function: process the request from PEP against policy list + policy list will(might) be a hashmap of subject:rules_vector (test and discuss) + */ + /* + PHASE1: policy decisions are hardcoded against the policy list; can later update them using a config file. + */ + //representaiton of policy list as a hashmap of trees? + // HashMap> + //use KeTrees for mapping R/W values? //need to check this out + Ok(false) + } + + pub fn policy_resource_point() { + + /* + input: config file value along with &self + output: loads the appropriate policy into the memory and returns back self (now with added policy info); might also select AC type (ACL or ABAC) + */ + + /* + PHASE1: just have a vector of structs containing these values; later we can load them here from config + */ + } +} + +pub fn testing_ke() {} From c5579c9a15189d9964e23a0e9e98793e198eefce Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 26 Jan 2024 22:04:01 +0100 Subject: [PATCH 041/122] WIP:added interceptor code and PEP logic --- zenoh/src/net/routing/interceptor/authz.rs | 283 ++++++++++++++++----- zenoh/src/net/routing/interceptor/mod.rs | 115 ++++++++- 2 files changed, 328 insertions(+), 70 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index ae1188352c..b387872cc2 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -2,106 +2,238 @@ use std::fmt; // use casbin::{CoreApi, Enforcer}; use super::RoutingContext; -use casbin::prelude::*; -use zenoh_keyexpr::keyexpr_tree::box_tree::KeBoxTree; +use serde::{Deserialize, Serialize}; +use serde_json::{Result, Value}; +use zenoh_config::ZenohId; +//use ZenohID; +//use zenoh_keyexpr::keyexpr_tree::box_tree::KeBoxTree; use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; +use std::{collections::HashMap, error::Error}; + +#[derive(Clone, Debug, Serialize, Deserialize)] pub enum Action { - NONE, - READ, - WRITE, - READWRITE, - SUBSCRIBEDECLARE, + Read, + Write, + Both, } -impl fmt::Debug for Action { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } +pub struct NewCtx { + pub(crate) ctx: RoutingContext, + pub(crate) zid: Option, } -impl fmt::Display for Action { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } + +#[derive(Debug, Serialize, Deserialize)] +pub struct Request { + sub: Subject, + obj: String, + action: Action, } -pub trait ZAuth { - fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; +pub struct RequestBuilder { + sub: Option, + obj: Option, + action: Option, } -impl ZAuth for Enforcer { - fn authz_testing(&self, zid: String, ke: String, act: String) -> ZResult { - /* - (zid, keyexpr, act): these values should be extraced from the authn code. - has to be atomic, to avoid another process sending the wrong info - */ - - if let Ok(authorized) = self.enforce((zid.clone(), ke.clone(), act.clone())) { - Ok(authorized) - } else { - println!("policy enforcement error"); - Ok(false) +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Subject { + id: ZenohId, + attributes: Option>, //might be mapped to u8 values eventually +} + +//subject_builder (add ID, attributes, roles) + +pub struct SubjectBuilder { + id: Option, + attributes: Option>, +} + +//request_builder (add subject, resource, action) //do we need one? + +impl RequestBuilder { + pub fn default() -> Self { + RequestBuilder { + sub: None, + obj: None, + action: None, } } + pub fn new() -> Self { + RequestBuilder::default() + //ctreas the default request + } + + pub fn sub(&mut self, sub: impl Into) -> &mut Self { + //adds subject + self.sub.insert(sub.into()); + self + } + + pub fn obj(&mut self, obj: impl Into) -> &mut Self { + self.obj.insert(obj.into()); + self + } + + pub fn action(&mut self, action: impl Into) -> &mut Self { + self.action.insert(action.into()); + self + } + + pub fn build(&mut self) -> ZResult { + let Some(sub) = self.sub; + let Some(obj) = self.obj; + let Some(action) = self.action; + + Ok(Request { sub, obj, action }) + } } -pub async fn start_authz() -> Result { - // get file value - let mut e = Enforcer::new("keymatch_model.conf", "keymatch_policy.csv").await?; - e.enable_log(true); - Ok(e) +impl SubjectBuilder { + pub fn new() -> Self { + //creates the default request + SubjectBuilder { + id: None, + attributes: None, + } + } + + pub fn id(&mut self, id: impl Into) -> &mut Self { + //adds subject + self.id.insert(id.into()); + self + } + + pub fn attributes(&mut self, attributes: impl Into>) -> &mut Self { + self.attributes.insert(attributes.into()); + self + } + + pub fn build(&mut self) -> ZResult { + let Some(id) = self.id; + let attr = self.attributes; + Ok(Subject { + id, + attributes: attr, + }) + } } -struct StaticPolicy { - //should have values needed for the policy to work - //config for local static policy +// impl fmt::Debug for Action { +// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +// write!(f, "{:?}", self) +// } +// } +// impl fmt::Display for Action { +// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +// write!(f, "{:?}", self) +// } +// } + +pub trait ZAuth { + fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; } -//policy builder (add policy type and ruleset) +// impl ZAuth for Enforcer { +// fn authz_testing(&self, zid: String, ke: String, act: String) -> ZResult { +// /* +// (zid, keyexpr, act): these values should be extraced from the authn code. +// has to be atomic, to avoid another process sending the wrong info +// */ +// if let Ok(authorized) = self.enforce((zid.clone(), ke.clone(), act.clone())) { +// Ok(authorized) +// } else { +// println!("policy enforcement error"); +// Ok(false) +// } +// } +// } -//subject_builder (add ID, attributes, roles) +/* replaced with PolicyEnforcer::init() function */ -//request_builder (add subject, resource, action) //do we need one? +// pub async fn start_authz() -> Result { +// // get file value +// let mut e = Enforcer::new("keymatch_model.conf", "keymatch_policy.csv").await?; +// e.enable_log(true); +// Ok(e) +// } -pub struct Subject { - //ID and list of possible policy attributes - id: Option, //can convert uuid to string and back - attributes: Option>, +//struct that defines each policy (add policy type and ruleset) +#[derive(Serialize, Deserialize, Debug)] +struct Policy { + // policy_type: u8, //for l,a,r [access-list, abac, rbac type policy] will be assuming acl for now + sub: Subject, + ke: String, + action: Action, + permission: bool, } +#[derive(Clone)] pub struct PolicyEnforcer { - policy_config: String, + policy_config: HashMap>, } impl PolicyEnforcer { - pub fn init_policy(pol_string: String) -> ZResult { + pub fn init() -> ZResult { + /* + Initializs the policy for the control logic + loads policy into memory from file/network path + creates the policy hashmap with the ke-tries for ke matching + should have polic-type in the mix here...need to verify + */ + //policy should be derived from config/file (hardcoding it for now) + //config for local static policy + let static_policy = r#"{ + ["subject":{"id": "muyid", "attributes": "location"},"ke":"my_ke","action":"Read","permission":true], + ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true], + ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true], + ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true] + }"#; + //desearlize to policy + let get_policy: Policy = serde_json::from_str(static_policy)?; + println!("print policy {:?}", get_policy); + let policy = Self::build_policy_map(get_policy); + let pe = Self { - policy_config: pol_string, + policy_config: policy, }; + + //also should start the logger here Ok(pe) } - pub fn policy_enforcement_point( - &self, - ctx: RoutingContext, - action: Action, - ) -> ZResult { + + pub fn build_policy_map(policy: Policy) { + + //convert policy to vector of hashmap (WIP) + /* + policy = subject : [ rule_1, + rule_2, + ... + rule_n + ] + where rule_i = action_i : (ke_tree_deny, ke_tree_allow) that deny/allow action_i + */ + } + pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { /* input: msg body - output: allow deny - function: depending on the msg, calls build_permission_request(msg,action), passes its output to policy_decision_point() - uses allow/deny output to block or pass the msg to routing table + output: allow/deny + function: depending on the msg, builds the subject, builds the request, passes the request to policy_decision_point() + collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table */ - let ke = ctx.full_expr().unwrap().to_owned(); - let zid = ctx.inface().unwrap().state.zid; + + let Some(ke) = new_ctx.ctx.full_expr(); + let zid = new_ctx.zid.unwrap(); //build subject here - //for now just set it - let s = Subject { - id: Some(zid.to_string()), - attributes: None, - }; - let decision = self.policy_decision_point(s, ke, action); + let subject = SubjectBuilder::new().id(zid).build()?; //.attributes(None).build(); + let request = RequestBuilder::new() + .sub(subject) + .obj(ke) + .action(action) + .build()?; + let decision = self.policy_decision_point(request)?; Ok(false) } pub fn permission_request_builder( @@ -121,9 +253,9 @@ impl PolicyEnforcer { PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) */ } - pub fn policy_decision_point(&self, subject: Subject, ke: String, action: u8) -> ZResult { + pub fn policy_decision_point(&self, request: Request) -> ZResult { /* - input: (sub,ke,act) + input: (request) output: true(allow)/false(deny) function: process the request from PEP against policy list policy list will(might) be a hashmap of subject:rules_vector (test and discuss) @@ -133,7 +265,10 @@ impl PolicyEnforcer { */ //representaiton of policy list as a hashmap of trees? // HashMap> - //use KeTrees for mapping R/W values? //need to check this out + /* use KeTrees for mapping R/W values? //need to check this out + tried KeTrees, didn't work + need own algorithm for pattern matching via modified trie-search + */ Ok(false) } @@ -150,4 +285,18 @@ impl PolicyEnforcer { } } -pub fn testing_ke() {} +#[cfg(test)] +mod tests { + #[test] + fn testing_acl_rules() { + //sample test stub + let result = 1 + 1; + assert_eq!(result, 2); + } + #[test] + fn testing_abac_rules() { + //sample test stub + let result = 1 + 1; + assert_eq!(result, 2); + } +} diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 22e0e4e549..29ac2b2841 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -17,9 +17,14 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +//! +mod authz; +use self::authz::{Action, NewCtx}; + use super::RoutingContext; -use zenoh_config::Config; -use zenoh_protocol::network::NetworkMessage; +use crate::net::routing::interceptor::authz::{PolicyEnforcer, ZAuth}; +use zenoh_config::{Config, ZenohId}; +use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub(crate) trait InterceptorTrait { @@ -48,7 +53,17 @@ pub(crate) fn interceptor_factories(_config: &Config) -> Vec // Add interceptors here // TODO build the list of intercetors with the correct order from the config // vec![Box::new(LoggerInterceptor {})] - vec![] + /* + this is the singleton for interceptors + all init code for AC should be called here + example, for casbin we are using the enforecer init here + for in-built AC, we will load the policy rules here and also set the parameters (type of policy etc) + */ + println!("the interceptor is initialized"); + + let policy_enforcer = PolicyEnforcer::init().expect("error setting up access control"); + //store the enforcer instance for use in rest of the sessions + vec![Box::new(AclEnforcer { e: policy_enforcer })] } pub(crate) struct InterceptorsChain { @@ -142,3 +157,97 @@ impl InterceptorFactoryTrait for LoggerInterceptor { Some(Box::new(IngressMsgLogger {})) } } + +pub(crate) struct AclEnforcer { + e: PolicyEnforcer, +} + +impl InterceptorFactoryTrait for AclEnforcer { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option) { + let e = self.e; + + let uid = transport.get_zid().unwrap(); + ( + Some(Box::new(IngressAclEnforcer { e })), + Some(Box::new(EgressAclEnforcer { zid: Some(uid), e })), + ) + } + + fn new_transport_multicast( + &self, + _transport: &TransportMulticast, + ) -> Option { + let e = self.e; + //let uid = _transport.get_zid().unwrap(); + + Some(Box::new(EgressAclEnforcer { e, zid: None })) + } + + fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { + let e = self.e; + Some(Box::new(IngressAclEnforcer { e })) + } +} + +pub(crate) struct IngressAclEnforcer { + // e: Option, + e: PolicyEnforcer, +} + +impl InterceptorTrait for IngressAclEnforcer { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + //intercept msg and send it to PEP + if let NetworkBody::Push(push) = ctx.msg.body { + if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { + let e = self.e; + let act = Action::Write; + let new_ctx = NewCtx { ctx, zid: None }; + + let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); + if !decision { + println!("Not allowed to Write"); + return None; + } else { + println!("Allowed to Write"); + } + } + } + Some(ctx) + } +} + +pub(crate) struct EgressAclEnforcer { + e: PolicyEnforcer, + zid: Option, +} + +impl InterceptorTrait for EgressAclEnforcer { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + //intercept msg and send it to PEP + if let NetworkBody::Push(push) = ctx.msg.body { + if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { + let e = self.e; + let act = Action::Read; + let new_ctx = NewCtx { ctx, zid: self.zid }; + let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); + if !decision { + println!("Not allowed to Read"); + return None; + } else { + println!("Allowed to Read"); + } + } + } + + Some(ctx) + } +} From 99d12846ecb16fb64186ae856763e4b9ce09a655 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 29 Jan 2024 11:26:27 +0100 Subject: [PATCH 042/122] WIP:added datastructures for policy and PDP logic --- zenoh/src/net/routing/interceptor/authz.rs | 95 ++++++++++++---------- zenoh/src/net/routing/interceptor/mod.rs | 2 +- 2 files changed, 55 insertions(+), 42 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index b387872cc2..627b1b9b27 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,4 +1,4 @@ -use std::fmt; +use std::{fmt, hash::Hash}; // use casbin::{CoreApi, Enforcer}; use super::RoutingContext; @@ -12,7 +12,7 @@ use zenoh_result::ZResult; use std::{collections::HashMap, error::Error}; -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { Read, Write, @@ -37,7 +37,7 @@ pub struct RequestBuilder { action: Option, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Subject { id: ZenohId, attributes: Option>, //might be mapped to u8 values eventually @@ -120,21 +120,10 @@ impl SubjectBuilder { } } -// impl fmt::Debug for Action { -// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -// write!(f, "{:?}", self) -// } -// } -// impl fmt::Display for Action { -// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -// write!(f, "{:?}", self) -// } +// pub trait ZAuth { +// fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; // } -pub trait ZAuth { - fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; -} - // impl ZAuth for Enforcer { // fn authz_testing(&self, zid: String, ke: String, act: String) -> ZResult { // /* @@ -160,8 +149,8 @@ pub trait ZAuth { // } //struct that defines each policy (add policy type and ruleset) -#[derive(Serialize, Deserialize, Debug)] -struct Policy { +#[derive(Serialize, Deserialize, Debug, Clone)] +struct Rule { // policy_type: u8, //for l,a,r [access-list, abac, rbac type policy] will be assuming acl for now sub: Subject, ke: String, @@ -169,10 +158,15 @@ struct Policy { permission: bool, } -#[derive(Clone)] -pub struct PolicyEnforcer { - policy_config: HashMap>, -} +#[derive(Clone, PartialEq, Eq, Hash)] +struct SubAct(Subject, Action); + +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct PolicyEnforcer(HashMap); //need to add tries here + +#[derive(Clone, PartialEq, Eq, Hash)] + +pub struct KeTrie {} impl PolicyEnforcer { pub fn init() -> ZResult { @@ -184,28 +178,21 @@ impl PolicyEnforcer { */ //policy should be derived from config/file (hardcoding it for now) //config for local static policy - let static_policy = r#"{ - ["subject":{"id": "muyid", "attributes": "location"},"ke":"my_ke","action":"Read","permission":true], - ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true], - ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true], - ["subject":{"id": "muyid", "attributes": "location"},"ke":"myke","action":"Read","permission":true] - }"#; - //desearlize to policy - let get_policy: Policy = serde_json::from_str(static_policy)?; - println!("print policy {:?}", get_policy); - let policy = Self::build_policy_map(get_policy); - - let pe = Self { - policy_config: policy, - }; + let policy_info = Self::policy_resource_point().unwrap(); + + //desearlize to vector of rules + let rule_set: Vec = serde_json::from_str(policy_info)?; + println!("print policy {:?}", rule_set); + let pe = Self::build_policy_map(rule_set).expect("policy not established"); //also should start the logger here Ok(pe) } - pub fn build_policy_map(policy: Policy) { + pub fn build_policy_map(policy: Vec) -> ZResult { + let pe: PolicyEnforcer; - //convert policy to vector of hashmap (WIP) + //convert vector of rules to a hashmap mapping subact to ketree (WIP) /* policy = subject : [ rule_1, rule_2, @@ -214,6 +201,7 @@ impl PolicyEnforcer { ] where rule_i = action_i : (ke_tree_deny, ke_tree_allow) that deny/allow action_i */ + Ok(pe) } pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { /* @@ -253,11 +241,12 @@ impl PolicyEnforcer { PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) */ } + pub fn policy_decision_point(&self, request: Request) -> ZResult { /* input: (request) output: true(allow)/false(deny) - function: process the request from PEP against policy list + function: process the request from PEP against the policy (self) policy list will(might) be a hashmap of subject:rules_vector (test and discuss) */ /* @@ -269,11 +258,24 @@ impl PolicyEnforcer { tried KeTrees, didn't work need own algorithm for pattern matching via modified trie-search */ + + //extract subject and action from request and create subact [this is our key for hashmap] + let subact = SubAct(request.sub, request.action); + let ke = request.obj; + // type policymap = + match self.0.get(&subact) { + Some(ktrie) => { + + // check if request ke has a match in ke-trie + // if ke in ke-trie, then Ok(true) else Ok(false) + } + None => return Ok(false), + } + Ok(false) } - pub fn policy_resource_point() { - + pub fn policy_resource_point() -> ZResult<&'static str> { /* input: config file value along with &self output: loads the appropriate policy into the memory and returns back self (now with added policy info); might also select AC type (ACL or ABAC) @@ -282,9 +284,20 @@ impl PolicyEnforcer { /* PHASE1: just have a vector of structs containing these values; later we can load them here from config */ + let static_policy = r#"{ + ["subject":{"id": 001, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + ["subject":{"id": 003, "attributes": "location_1"},"ke":"demo/*","action":"Both","permission":true] + }"#; + Ok(static_policy) } } +// fn ketrie_matcher(ke,ketrie){ + +// } + #[cfg(test)] mod tests { #[test] diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 29ac2b2841..651d75f38c 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -22,7 +22,7 @@ mod authz; use self::authz::{Action, NewCtx}; use super::RoutingContext; -use crate::net::routing::interceptor::authz::{PolicyEnforcer, ZAuth}; +use crate::net::routing::interceptor::authz::PolicyEnforcer; use zenoh_config::{Config, ZenohId}; use zenoh_protocol::network::{NetworkBody, NetworkMessage}; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; From e11313fc05f8d75b1e15a9fb4664e1706f823cb4 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 30 Jan 2024 10:06:24 +0100 Subject: [PATCH 043/122] WIP:first basic acl prototype --- Cargo.lock | 33 ++++++ rules.csv | 6 ++ zenoh/Cargo.toml | 3 +- zenoh/src/net/routing/interceptor/authz.rs | 113 ++++++++++++++++----- zenoh/src/net/routing/interceptor/mod.rs | 36 ++++--- 5 files changed, 148 insertions(+), 43 deletions(-) create mode 100644 rules.csv diff --git a/Cargo.lock b/Cargo.lock index 7ff6cbd6ec..ce5f077779 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -984,6 +984,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "csv" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "csv-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +dependencies = [ + "memchr", +] + [[package]] name = "ctr" version = "0.6.0" @@ -1264,6 +1285,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fr-trie" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44e22abe50d63925651ab549a8ffdfe6cb6a7b058ea1cf99fd4c2b6dbc814d61" +dependencies = [ + "bitflags 1.3.2", + "serde", +] + [[package]] name = "fraction" version = "0.13.1" @@ -4568,10 +4599,12 @@ dependencies = [ "async-trait", "base64 0.21.4", "const_format", + "csv", "env_logger", "event-listener 4.0.0", "flume", "form_urlencoded", + "fr-trie", "futures", "git-version", "hex", diff --git a/rules.csv b/rules.csv new file mode 100644 index 0000000000..5fe93e83d6 --- /dev/null +++ b/rules.csv @@ -0,0 +1,6 @@ +subject,ke,action,permission +aaa3b411006ad57868988f9fec672a31,demo/example/*,Write,true +aaa3b411006ad57868988f9fec672a31,demo/example/mypub,Write,false +bbb3b411006ad57868988f9fec672a31,demo/example/mypub,Read,true +aaabbb11006ad57868988f9fec672a31,demo/*,Read,true +aaabbb11006ad57868988f9fec672a31,demo/*,Write,true \ No newline at end of file diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 0177c2d454..f68cbe20ad 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -104,7 +104,8 @@ zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } - +fr-trie = "*" +csv = "1.3.0" [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 627b1b9b27..add36f3f9c 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -2,6 +2,7 @@ use std::{fmt, hash::Hash}; // use casbin::{CoreApi, Enforcer}; use super::RoutingContext; +use csv::ReaderBuilder; use serde::{Deserialize, Serialize}; use serde_json::{Result, Value}; use zenoh_config::ZenohId; @@ -10,6 +11,9 @@ use zenoh_config::ZenohId; use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; +use fr_trie::glob::acl::{Acl, AclTrie, Permissions}; +use fr_trie::glob::GlobMatcher; + use std::{collections::HashMap, error::Error}; #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] @@ -19,8 +23,8 @@ pub enum Action { Both, } -pub struct NewCtx { - pub(crate) ctx: RoutingContext, +pub struct NewCtx<'a> { + pub(crate) ke: &'a str, pub(crate) zid: Option, } @@ -37,10 +41,12 @@ pub struct RequestBuilder { action: Option, } +type KeTree = AclTrie; + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Subject { id: ZenohId, - attributes: Option>, //might be mapped to u8 values eventually + // attributes: Option>, //might be mapped to other types eventually } //subject_builder (add ID, attributes, roles) @@ -82,9 +88,9 @@ impl RequestBuilder { } pub fn build(&mut self) -> ZResult { - let Some(sub) = self.sub; - let Some(obj) = self.obj; - let Some(action) = self.action; + let sub = self.sub.clone().unwrap(); + let obj = self.obj.clone().unwrap(); + let action = self.action.clone().unwrap(); Ok(Request { sub, obj, action }) } @@ -111,11 +117,11 @@ impl SubjectBuilder { } pub fn build(&mut self) -> ZResult { - let Some(id) = self.id; - let attr = self.attributes; + let id = self.id.unwrap(); + let attr = self.attributes.clone(); Ok(Subject { id, - attributes: attr, + // attributes: attr, }) } } @@ -150,7 +156,7 @@ impl SubjectBuilder { //struct that defines each policy (add policy type and ruleset) #[derive(Serialize, Deserialize, Debug, Clone)] -struct Rule { +pub struct Rule { // policy_type: u8, //for l,a,r [access-list, abac, rbac type policy] will be assuming acl for now sub: Subject, ke: String, @@ -159,10 +165,10 @@ struct Rule { } #[derive(Clone, PartialEq, Eq, Hash)] -struct SubAct(Subject, Action); +pub struct SubAct(Subject, Action); -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct PolicyEnforcer(HashMap); //need to add tries here +#[derive(Clone)] //, PartialEq, Eq, Hash)] +pub struct PolicyEnforcer(HashMap); //need to add tries here #[derive(Clone, PartialEq, Eq, Hash)] @@ -178,10 +184,12 @@ impl PolicyEnforcer { */ //policy should be derived from config/file (hardcoding it for now) //config for local static policy - let policy_info = Self::policy_resource_point().unwrap(); + // let policy_info = Self::policy_resource_point().unwrap(); //desearlize to vector of rules - let rule_set: Vec = serde_json::from_str(policy_info)?; + // let rule_set: Vec = serde_json::from_str(policy_info)?; + + let rule_set = Self::policy_resource_point().unwrap(); println!("print policy {:?}", rule_set); let pe = Self::build_policy_map(rule_set).expect("policy not established"); @@ -189,8 +197,9 @@ impl PolicyEnforcer { Ok(pe) } - pub fn build_policy_map(policy: Vec) -> ZResult { - let pe: PolicyEnforcer; + pub fn build_policy_map(rule_set: Vec) -> ZResult { + //let pe: PolicyEnforcer; + let mut policy = PolicyEnforcer(HashMap::new()); //convert vector of rules to a hashmap mapping subact to ketree (WIP) /* @@ -201,7 +210,39 @@ impl PolicyEnforcer { ] where rule_i = action_i : (ke_tree_deny, ke_tree_allow) that deny/allow action_i */ - Ok(pe) + + // let mut policy = Policy(HashMap::new()); + //now create a hashmap for ketrees ((sub->action)->ketree) + let rules: HashMap; //u8 is 0 for disallowed and 1 for allowed?? + //iterate through the map to get + for v in rule_set { + //for each rule + //extract subject and action + let sub = v.sub; + let action = v.action; + let ke = v.ke; + let perm = v.permission; + //create subact + let subact = SubAct(sub, action); + //match subact in the policy hashmap + if !policy.0.contains_key(&subact) { + //create new entry for subact + ketree + let mut ketree = KeTree::new(); + ketree.insert(Acl::new(&ke), Permissions::READ); + // ketree.insert(ke,1).unwrap(); //1 for allowed?? + policy.0.insert(subact, ketree); + } else { + let mut ketree = policy.0.get_mut(&subact).unwrap(); + // ketree.insert(ke,1).unwrap(); //1 for allowed?? + // let mut ketree = KeTree::new(); + let x = Permissions::READ; + ketree.insert(Acl::new(&ke), Permissions::READ); + // policy.0.insert(subact,ketree); + } + } + //return policy; + + Ok(policy) } pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { /* @@ -211,7 +252,7 @@ impl PolicyEnforcer { collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table */ - let Some(ke) = new_ctx.ctx.full_expr(); + let ke = new_ctx.ke; let zid = new_ctx.zid.unwrap(); //build subject here @@ -265,9 +306,13 @@ impl PolicyEnforcer { // type policymap = match self.0.get(&subact) { Some(ktrie) => { - // check if request ke has a match in ke-trie // if ke in ke-trie, then Ok(true) else Ok(false) + //let trie = self.0.get.(&subact).clone(); + let result = ktrie.get_merge::(&Acl::new(&ke)); + if let Some(value) = result { + return Ok(true); + } } None => return Ok(false), } @@ -275,7 +320,7 @@ impl PolicyEnforcer { Ok(false) } - pub fn policy_resource_point() -> ZResult<&'static str> { + pub fn policy_resource_point() -> ZResult> { /* input: config file value along with &self output: loads the appropriate policy into the memory and returns back self (now with added policy info); might also select AC type (ACL or ABAC) @@ -284,13 +329,25 @@ impl PolicyEnforcer { /* PHASE1: just have a vector of structs containing these values; later we can load them here from config */ - let static_policy = r#"{ - ["subject":{"id": 001, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - ["subject":{"id": 003, "attributes": "location_1"},"ke":"demo/*","action":"Both","permission":true] - }"#; - Ok(static_policy) + let mut rule_set: Vec = Vec::new(); + let mut rdr = ReaderBuilder::new() + .has_headers(true) + .from_path("rules.csv") + .unwrap(); + + for result in rdr.deserialize() { + if let Ok(rec) = result { + let record: Rule = rec; + rule_set.push(record); + } + } + // let static_policy = r#"{ + // ["subject":{"id": 001, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + // ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + // ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], + // ["subject":{"id": 003, "attributes": "location_1"},"ke":"demo/*","action":"Both","permission":true] + // }"#; + Ok(rule_set) } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 651d75f38c..bd5f51237b 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -167,12 +167,15 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - let e = self.e; + let e = &self.e; let uid = transport.get_zid().unwrap(); ( - Some(Box::new(IngressAclEnforcer { e })), - Some(Box::new(EgressAclEnforcer { zid: Some(uid), e })), + Some(Box::new(IngressAclEnforcer { e: e.clone() })), + Some(Box::new(EgressAclEnforcer { + zid: Some(uid), + e: e.clone(), + })), ) } @@ -180,15 +183,18 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { - let e = self.e; + let e = &self.e; //let uid = _transport.get_zid().unwrap(); - Some(Box::new(EgressAclEnforcer { e, zid: None })) + Some(Box::new(EgressAclEnforcer { + e: e.clone(), + zid: None, + })) } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - let e = self.e; - Some(Box::new(IngressAclEnforcer { e })) + let e = &self.e; + Some(Box::new(IngressAclEnforcer { e: e.clone() })) } } @@ -203,12 +209,13 @@ impl InterceptorTrait for IngressAclEnforcer { ctx: RoutingContext, ) -> Option> { //intercept msg and send it to PEP - if let NetworkBody::Push(push) = ctx.msg.body { + if let NetworkBody::Push(push) = ctx.msg.body.clone() { if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { - let e = self.e; + let e = &self.e; let act = Action::Write; - let new_ctx = NewCtx { ctx, zid: None }; - + let ke: &str = ctx.full_expr().unwrap(); + let zid = ctx.inface().unwrap().state.zid; + let new_ctx = NewCtx { ke, zid: Some(zid) }; //how to get the zid here let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { println!("Not allowed to Write"); @@ -233,11 +240,12 @@ impl InterceptorTrait for EgressAclEnforcer { ctx: RoutingContext, ) -> Option> { //intercept msg and send it to PEP - if let NetworkBody::Push(push) = ctx.msg.body { + if let NetworkBody::Push(push) = ctx.msg.body.clone() { if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { - let e = self.e; + let e = &self.e; let act = Action::Read; - let new_ctx = NewCtx { ctx, zid: self.zid }; + let ke: &str = ctx.full_expr().unwrap(); + let new_ctx = NewCtx { ke, zid: self.zid }; let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { println!("Not allowed to Read"); From 74e12fd81d999b80c2d1170551c9f4b4d40faff0 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 30 Jan 2024 17:31:32 +0100 Subject: [PATCH 044/122] WIP:first acl prototype --- DEFAULT_CONFIG.json5 | 28 +- pub_config.json5 | 406 +++++++++++++++++++++ rules.csv | 6 - rules.json5 | 47 +++ sub_config.json5 | 406 +++++++++++++++++++++ zenoh/src/net/routing/interceptor/authz.rs | 119 +++--- zenoh/src/net/routing/interceptor/mod.rs | 8 +- 7 files changed, 937 insertions(+), 83 deletions(-) create mode 100644 pub_config.json5 delete mode 100644 rules.csv create mode 100644 rules.json5 create mode 100644 sub_config.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bde1b8fd03..e76b57e552 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -6,17 +6,14 @@ /// that zenoh runtime will use. /// If not set, a random unsigned 128bit integer will be used. /// WARNING: this id must be unique in your zenoh network. - // id: "1234567890abcdef", - + id: "aaabbb11006ad57868988f9fec672a31", /// The node's mode (router, peer or client) - mode: "peer", - + mode: "router", /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", location: "Penny Lane" }, - /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. connect: { @@ -24,7 +21,6 @@ // "/

" ], }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. @@ -50,7 +46,8 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: "", peer: "router|peer" + }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, }, @@ -67,24 +64,23 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: "", peer: "router|peer" + }, }, }, - /// Configuration of data messages timestamps management. timestamping: { /// Whether data messages should be timestamped if not already. /// Accepts a single boolean value or different values for router, peer and client. - enabled: { router: true, peer: false, client: false }, + enabled: { router: true, peer: false, client: false + }, /// Whether data messages with timestamps in the future should be dropped or not. /// If set to false (default), messages with timestamps in the future are retimestamped. /// Timestamps are ignored if timestamping is disabled. drop_future_timestamp: false, }, - /// The default timeout to apply to queries in milliseconds. queries_default_timeout: 10000, - /// The routing strategy to use and it's configuration. routing: { /// The routing strategy to use in routers and it's configuration. @@ -101,7 +97,6 @@ mode: "peer_to_peer", }, }, - // /// The declarations aggregation strategy. // aggregation: { // /// A list of key-expressions for which all included subscribers will be aggregated into. @@ -113,7 +108,6 @@ // // key_expression // ], // }, - /// Configure internal transport parameters transport: { unicast: { @@ -267,7 +261,6 @@ }, }, }, - /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release adminspace: { @@ -277,7 +270,6 @@ write: false, }, }, - /// /// Plugins configurations /// @@ -405,7 +397,6 @@ // }, // }, // }, - // /// Plugin configuration example using `__config__` property // plugins: { // rest: { @@ -415,5 +406,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, - -} +} \ No newline at end of file diff --git a/pub_config.json5 b/pub_config.json5 new file mode 100644 index 0000000000..ab4cbc4e21 --- /dev/null +++ b/pub_config.json5 @@ -0,0 +1,406 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + id: "aaa3b411006ad57868988f9fec672a31", + /// The node's mode (router, peer or client) + mode: "client", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + connect: { + endpoints: [ + // "/
" + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + listen: { + endpoints: [ + // "/
" + ], + }, + // /// Configure the scouting mechanisms and their behaviours + // scouting: { + // /// In client mode, the period dedicated to scouting for a router before failing + // timeout: 3000, + // /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + // delay: 200, + // /// The multicast scouting configuration. + // multicast: { + // /// Whether multicast scouting is enabled or not + // enabled: true, + // /// The socket which should be used for multicast scouting + // address: "224.0.0.224:7446", + // /// The network interface which should be used for multicast scouting + // interface: "auto", // If not set or set to "auto" the interface if picked automatically + // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + // /// Accepts a single value or different values for router, peer and client. + // /// Each value is bit-or-like combinations of "peer", "router" and "client". + // autoconnect: { router: "", peer: "router|peer" }, + // /// Whether or not to listen for scout messages on UDP multicast and reply to them. + // listen: true, + // }, + // /// The gossip scouting configuration. + // gossip: { + // /// Whether gossip scouting is enabled or not + // enabled: true, + // /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + // /// When false, gossip scouting informations are only propagated to the next hop. + // /// Activating multihop gossip implies more scouting traffic and a lower scalability. + // /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + // /// direct connectivity with each other. + // multihop: false, + // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + // /// Accepts a single value or different values for router, peer and client. + // /// Each value is bit-or-like combinations of "peer", "router" and "client". + // autoconnect: { router: "", peer: "router|peer" }, + // }, + // }, + // /// Configuration of data messages timestamps management. + // timestamping: { + // /// Whether data messages should be timestamped if not already. + // /// Accepts a single boolean value or different values for router, peer and client. + // enabled: { router: true, peer: false, client: false }, + // /// Whether data messages with timestamps in the future should be dropped or not. + // /// If set to false (default), messages with timestamps in the future are retimestamped. + // /// Timestamps are ignored if timestamping is disabled. + // drop_future_timestamp: false, + // }, + // /// The default timeout to apply to queries in milliseconds. + // queries_default_timeout: 10000, + // /// The routing strategy to use and it's configuration. + // routing: { + // /// The routing strategy to use in routers and it's configuration. + // router: { + // /// When set to true a router will forward data between two peers + // /// directly connected to it if it detects that those peers are not + // /// connected to each other. + // /// The failover brokering only works if gossip discovery is enabled. + // peers_failover_brokering: true, + // }, + // /// The routing strategy to use in peers and it's configuration. + // peer: { + // /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + // mode: "peer_to_peer", + // }, + // }, + // // /// The declarations aggregation strategy. + // // aggregation: { + // // /// A list of key-expressions for which all included subscribers will be aggregated into. + // // subscribers: [ + // // // key_expression + // // ], + // // /// A list of key-expressions for which all included publishers will be aggregated into. + // // publishers: [ + // // // key_expression + // // ], + // // }, + // /// Configure internal transport parameters + // transport: { + // unicast: { + // /// Timeout in milliseconds when opening a link + // accept_timeout: 10000, + // /// Maximum number of zenoh session in pending state while accepting + // accept_pending: 100, + // /// Maximum number of sessions that can be simultaneously alive + // max_sessions: 1000, + // /// Maximum number of incoming links that are admitted per session + // max_links: 1, + // /// Enables the LowLatency transport + // /// This option does not make LowLatency transport mandatory, the actual implementation of transport + // /// used will depend on Establish procedure and other party's settings + // /// + // /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + // /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + // /// enable 'lowlatency' you need to explicitly disable 'qos'. + // lowlatency: false, + // /// Enables QoS on unicast communications. + // qos: { + // enabled: true, + // }, + // /// Enables compression on unicast communications. + // /// Compression capabilities are negotiated during session establishment. + // /// If both Zenoh nodes support compression, then compression is activated. + // compression: { + // enabled: false, + // }, + // }, + // multicast: { + // /// Enables QoS on multicast communication. + // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + // qos: { + // enabled: false, + // }, + // /// Enables compression on multicast communication. + // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + // compression: { + // enabled: false, + // }, + // }, + // link: { + // /// An optional whitelist of protocols to be used for accepting and opening sessions. + // /// If not configured, all the supported protocols are automatically whitelisted. + // /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + // /// For example, to only enable "tls" and "quic": + // // protocols: ["tls", "quic"], + // /// Configure the zenoh TX parameters of a link + // tx: { + // /// The resolution in bits to be used for the message sequence numbers. + // /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + // /// Accepted values: 8bit, 16bit, 32bit, 64bit. + // sequence_number_resolution: "32bit", + // /// Link lease duration in milliseconds to announce to other zenoh nodes + // lease: 10000, + // /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + // /// messages will be sent at the configured time interval. + // /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + // /// set the actual keep_alive timeout to one fourth of the lease time. + // /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + // /// check which considers a link as failed when no messages are received in 3.5 times the + // /// target interval. + // keep_alive: 4, + // /// Batch size in bytes is expressed as a 16bit unsigned integer. + // /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + // /// The default batch size value is the maximum batch size: 65535. + // batch_size: 65535, + // /// Each zenoh link has a transmission queue that can be configured + // queue: { + // /// The size of each priority queue indicates the number of batches a given queue can contain. + // /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + // /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + // /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + // /// If qos is false, then only the DATA priority will be allocated. + // size: { + // control: 1, + // real_time: 1, + // interactive_high: 1, + // interactive_low: 1, + // data_high: 2, + // data: 4, + // data_low: 4, + // background: 4, + // }, + // /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + // /// Higher values lead to a more aggressive batching but it will introduce additional latency. + // backoff: 100, + // // Number of threads dedicated to transmission + // // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // // threads: 4, + // }, + // }, + // /// Configure the zenoh RX parameters of a link + // rx: { + // /// Receiving buffer size in bytes for each link + // /// The default the rx_buffer_size value is the same as the default batch size: 65335. + // /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + // /// more in-flight data. This is particularly relevant when dealing with large messages. + // /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + // buffer_size: 65535, + // /// Maximum size of the defragmentation buffer at receiver end. + // /// Fragmented messages that are larger than the configured size will be dropped. + // /// The default value is 1GiB. This would work in most scenarios. + // /// NOTE: reduce the value if you are operating on a memory constrained device. + // max_message_size: 1073741824, + // }, + // /// Configure TLS specific parameters + // tls: { + // /// Path to the certificate of the certificate authority used to validate either the server + // /// or the client's keys and certificates, depending on the node's mode. If not specified + // /// on router mode then the default WebPKI certificates are used instead. + // root_ca_certificate: null, + // /// Path to the TLS server private key + // server_private_key: null, + // /// Path to the TLS server public certificate + // server_certificate: null, + // /// Client authentication, if true enables mTLS (mutual authentication) + // client_auth: false, + // /// Path to the TLS client private key + // client_private_key: null, + // /// Path to the TLS client public certificate + // client_certificate: null, + // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + // server_name_verification: null, + // }, + // }, + // /// Shared memory configuration + // shared_memory: { + // enabled: false, + // }, + // /// Access control configuration + // auth: { + // /// The configuration of authentification. + // /// A password implies a username is required. + // usrpwd: { + // user: null, + // password: null, + // /// The path to a file containing the user password dictionary + // dictionary_file: null, + // }, + // pubkey: { + // public_key_pem: null, + // private_key_pem: null, + // public_key_file: null, + // private_key_file: null, + // key_size: null, + // known_keys_file: null, + // }, + // }, + // }, + // /// Configure the Admin Space + // /// Unstable: this configuration part works as advertised, but may change in a future release + // adminspace: { + // // read and/or write permissions on the admin space + // permissions: { + // read: true, + // write: false, + // }, + // }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file diff --git a/rules.csv b/rules.csv deleted file mode 100644 index 5fe93e83d6..0000000000 --- a/rules.csv +++ /dev/null @@ -1,6 +0,0 @@ -subject,ke,action,permission -aaa3b411006ad57868988f9fec672a31,demo/example/*,Write,true -aaa3b411006ad57868988f9fec672a31,demo/example/mypub,Write,false -bbb3b411006ad57868988f9fec672a31,demo/example/mypub,Read,true -aaabbb11006ad57868988f9fec672a31,demo/*,Read,true -aaabbb11006ad57868988f9fec672a31,demo/*,Write,true \ No newline at end of file diff --git a/rules.json5 b/rules.json5 new file mode 100644 index 0000000000..1f1b4f7235 --- /dev/null +++ b/rules.json5 @@ -0,0 +1,47 @@ +[ + { + "sub": { + "id": "aaa3b411006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "demo/example/**", + "action": "Write", + "permission": true + }, + { + "sub": { + "id": "aaa3b411006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "demo/example/zenoh-rs-pub", + "action": "Write", + "permission": false + }, + { + "sub": { + "id": "bbb3b411006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "demo/example/zenoh-rs-pub", + "action": "Read", + "permission": true + }, + { + "sub": { + "id": "aaabbb11006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "demo/example/**", + "action": "Read", + "permission": true + }, + { + "sub": { + "id": "aaabbb11006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "demo/example/**", + "action": "Write", + "permission": true + } +] \ No newline at end of file diff --git a/sub_config.json5 b/sub_config.json5 new file mode 100644 index 0000000000..b163e2b4f2 --- /dev/null +++ b/sub_config.json5 @@ -0,0 +1,406 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + id: "bbb3b411006ad57868988f9fec672a31", + /// The node's mode (router, peer or client) + mode: "client", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "blueberry", + location: "Dollar Street" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + connect: { + endpoints: [ + // "/
" + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + listen: { + endpoints: [ + // "/
" + ], + }, + // /// Configure the scouting mechanisms and their behaviours + // scouting: { + // /// In client mode, the period dedicated to scouting for a router before failing + // timeout: 3000, + // /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + // delay: 200, + // /// The multicast scouting configuration. + // multicast: { + // /// Whether multicast scouting is enabled or not + // enabled: true, + // /// The socket which should be used for multicast scouting + // address: "224.0.0.224:7446", + // /// The network interface which should be used for multicast scouting + // interface: "auto", // If not set or set to "auto" the interface if picked automatically + // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + // /// Accepts a single value or different values for router, peer and client. + // /// Each value is bit-or-like combinations of "peer", "router" and "client". + // autoconnect: { router: "", peer: "router|peer" }, + // /// Whether or not to listen for scout messages on UDP multicast and reply to them. + // listen: true, + // }, + // /// The gossip scouting configuration. + // gossip: { + // /// Whether gossip scouting is enabled or not + // enabled: true, + // /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + // /// When false, gossip scouting informations are only propagated to the next hop. + // /// Activating multihop gossip implies more scouting traffic and a lower scalability. + // /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + // /// direct connectivity with each other. + // multihop: false, + // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + // /// Accepts a single value or different values for router, peer and client. + // /// Each value is bit-or-like combinations of "peer", "router" and "client". + // autoconnect: { router: "", peer: "router|peer" }, + // }, + // }, + // /// Configuration of data messages timestamps management. + // timestamping: { + // /// Whether data messages should be timestamped if not already. + // /// Accepts a single boolean value or different values for router, peer and client. + // enabled: { router: true, peer: false, client: false }, + // /// Whether data messages with timestamps in the future should be dropped or not. + // /// If set to false (default), messages with timestamps in the future are retimestamped. + // /// Timestamps are ignored if timestamping is disabled. + // drop_future_timestamp: false, + // }, + // /// The default timeout to apply to queries in milliseconds. + // queries_default_timeout: 10000, + // /// The routing strategy to use and it's configuration. + // routing: { + // /// The routing strategy to use in routers and it's configuration. + // router: { + // /// When set to true a router will forward data between two peers + // /// directly connected to it if it detects that those peers are not + // /// connected to each other. + // /// The failover brokering only works if gossip discovery is enabled. + // peers_failover_brokering: true, + // }, + // /// The routing strategy to use in peers and it's configuration. + // peer: { + // /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + // mode: "peer_to_peer", + // }, + // }, + // // /// The declarations aggregation strategy. + // // aggregation: { + // // /// A list of key-expressions for which all included subscribers will be aggregated into. + // // subscribers: [ + // // // key_expression + // // ], + // // /// A list of key-expressions for which all included publishers will be aggregated into. + // // publishers: [ + // // // key_expression + // // ], + // // }, + // /// Configure internal transport parameters + // transport: { + // unicast: { + // /// Timeout in milliseconds when opening a link + // accept_timeout: 10000, + // /// Maximum number of zenoh session in pending state while accepting + // accept_pending: 100, + // /// Maximum number of sessions that can be simultaneously alive + // max_sessions: 1000, + // /// Maximum number of incoming links that are admitted per session + // max_links: 1, + // /// Enables the LowLatency transport + // /// This option does not make LowLatency transport mandatory, the actual implementation of transport + // /// used will depend on Establish procedure and other party's settings + // /// + // /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + // /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + // /// enable 'lowlatency' you need to explicitly disable 'qos'. + // lowlatency: false, + // /// Enables QoS on unicast communications. + // qos: { + // enabled: true, + // }, + // /// Enables compression on unicast communications. + // /// Compression capabilities are negotiated during session establishment. + // /// If both Zenoh nodes support compression, then compression is activated. + // compression: { + // enabled: false, + // }, + // }, + // multicast: { + // /// Enables QoS on multicast communication. + // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + // qos: { + // enabled: false, + // }, + // /// Enables compression on multicast communication. + // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + // compression: { + // enabled: false, + // }, + // }, + // link: { + // /// An optional whitelist of protocols to be used for accepting and opening sessions. + // /// If not configured, all the supported protocols are automatically whitelisted. + // /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + // /// For example, to only enable "tls" and "quic": + // // protocols: ["tls", "quic"], + // /// Configure the zenoh TX parameters of a link + // tx: { + // /// The resolution in bits to be used for the message sequence numbers. + // /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + // /// Accepted values: 8bit, 16bit, 32bit, 64bit. + // sequence_number_resolution: "32bit", + // /// Link lease duration in milliseconds to announce to other zenoh nodes + // lease: 10000, + // /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + // /// messages will be sent at the configured time interval. + // /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + // /// set the actual keep_alive timeout to one fourth of the lease time. + // /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + // /// check which considers a link as failed when no messages are received in 3.5 times the + // /// target interval. + // keep_alive: 4, + // /// Batch size in bytes is expressed as a 16bit unsigned integer. + // /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + // /// The default batch size value is the maximum batch size: 65535. + // batch_size: 65535, + // /// Each zenoh link has a transmission queue that can be configured + // queue: { + // /// The size of each priority queue indicates the number of batches a given queue can contain. + // /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + // /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + // /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + // /// If qos is false, then only the DATA priority will be allocated. + // size: { + // control: 1, + // real_time: 1, + // interactive_high: 1, + // interactive_low: 1, + // data_high: 2, + // data: 4, + // data_low: 4, + // background: 4, + // }, + // /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + // /// Higher values lead to a more aggressive batching but it will introduce additional latency. + // backoff: 100, + // // Number of threads dedicated to transmission + // // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // // threads: 4, + // }, + // }, + // /// Configure the zenoh RX parameters of a link + // rx: { + // /// Receiving buffer size in bytes for each link + // /// The default the rx_buffer_size value is the same as the default batch size: 65335. + // /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + // /// more in-flight data. This is particularly relevant when dealing with large messages. + // /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + // buffer_size: 65535, + // /// Maximum size of the defragmentation buffer at receiver end. + // /// Fragmented messages that are larger than the configured size will be dropped. + // /// The default value is 1GiB. This would work in most scenarios. + // /// NOTE: reduce the value if you are operating on a memory constrained device. + // max_message_size: 1073741824, + // }, + // /// Configure TLS specific parameters + // tls: { + // /// Path to the certificate of the certificate authority used to validate either the server + // /// or the client's keys and certificates, depending on the node's mode. If not specified + // /// on router mode then the default WebPKI certificates are used instead. + // root_ca_certificate: null, + // /// Path to the TLS server private key + // server_private_key: null, + // /// Path to the TLS server public certificate + // server_certificate: null, + // /// Client authentication, if true enables mTLS (mutual authentication) + // client_auth: false, + // /// Path to the TLS client private key + // client_private_key: null, + // /// Path to the TLS client public certificate + // client_certificate: null, + // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + // server_name_verification: null, + // }, + // }, + // /// Shared memory configuration + // shared_memory: { + // enabled: false, + // }, + // /// Access control configuration + // auth: { + // /// The configuration of authentification. + // /// A password implies a username is required. + // usrpwd: { + // user: null, + // password: null, + // /// The path to a file containing the user password dictionary + // dictionary_file: null, + // }, + // pubkey: { + // public_key_pem: null, + // private_key_pem: null, + // public_key_file: null, + // private_key_file: null, + // key_size: null, + // known_keys_file: null, + // }, + // }, + // }, + // /// Configure the Admin Space + // /// Unstable: this configuration part works as advertised, but may change in a future release + // adminspace: { + // // read and/or write permissions on the admin space + // permissions: { + // read: true, + // write: false, + // }, + // }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index add36f3f9c..fa9664e511 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,13 +1,13 @@ +use std::fs::File; +use std::io::Read; use std::{fmt, hash::Hash}; -// use casbin::{CoreApi, Enforcer}; use super::RoutingContext; use csv::ReaderBuilder; use serde::{Deserialize, Serialize}; use serde_json::{Result, Value}; use zenoh_config::ZenohId; -//use ZenohID; -//use zenoh_keyexpr::keyexpr_tree::box_tree::KeBoxTree; + use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; @@ -18,6 +18,7 @@ use std::{collections::HashMap, error::Error}; #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { + None, Read, Write, Both, @@ -46,14 +47,14 @@ type KeTree = AclTrie; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Subject { id: ZenohId, - // attributes: Option>, //might be mapped to other types eventually + attributes: Option>, //might be mapped to other types eventually } //subject_builder (add ID, attributes, roles) pub struct SubjectBuilder { id: Option, - attributes: Option>, + attributes: Option>, } //request_builder (add subject, resource, action) //do we need one? @@ -111,7 +112,7 @@ impl SubjectBuilder { self } - pub fn attributes(&mut self, attributes: impl Into>) -> &mut Self { + pub fn attributes(&mut self, attributes: impl Into>) -> &mut Self { self.attributes.insert(attributes.into()); self } @@ -121,7 +122,7 @@ impl SubjectBuilder { let attr = self.attributes.clone(); Ok(Subject { id, - // attributes: attr, + attributes: attr, }) } } @@ -155,7 +156,7 @@ impl SubjectBuilder { // } //struct that defines each policy (add policy type and ruleset) -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Clone)] pub struct Rule { // policy_type: u8, //for l,a,r [access-list, abac, rbac type policy] will be assuming acl for now sub: Subject, @@ -164,7 +165,7 @@ pub struct Rule { permission: bool, } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct SubAct(Subject, Action); #[derive(Clone)] //, PartialEq, Eq, Hash)] @@ -190,9 +191,7 @@ impl PolicyEnforcer { // let rule_set: Vec = serde_json::from_str(policy_info)?; let rule_set = Self::policy_resource_point().unwrap(); - println!("print policy {:?}", rule_set); let pe = Self::build_policy_map(rule_set).expect("policy not established"); - //also should start the logger here Ok(pe) } @@ -213,18 +212,27 @@ impl PolicyEnforcer { // let mut policy = Policy(HashMap::new()); //now create a hashmap for ketrees ((sub->action)->ketree) - let rules: HashMap; //u8 is 0 for disallowed and 1 for allowed?? - //iterate through the map to get + // let rules: HashMap; //u8 is 0 for disallowed and 1 for allowed?? + //iterate through the map to get for v in rule_set { //for each rule //extract subject and action + + /* + for now permission not allowed means it will not be added to the allow trie + */ + let perm = v.permission; + if !perm { + continue; + } let sub = v.sub; let action = v.action; let ke = v.ke; - let perm = v.permission; + //let perm = v.permission; //create subact let subact = SubAct(sub, action); //match subact in the policy hashmap + #[allow(clippy::map_entry)] if !policy.0.contains_key(&subact) { //create new entry for subact + ketree let mut ketree = KeTree::new(); @@ -232,10 +240,10 @@ impl PolicyEnforcer { // ketree.insert(ke,1).unwrap(); //1 for allowed?? policy.0.insert(subact, ketree); } else { - let mut ketree = policy.0.get_mut(&subact).unwrap(); + let ketree = policy.0.get_mut(&subact).unwrap(); // ketree.insert(ke,1).unwrap(); //1 for allowed?? // let mut ketree = KeTree::new(); - let x = Permissions::READ; + // let x = Permissions::READ; ketree.insert(Acl::new(&ke), Permissions::READ); // policy.0.insert(subact,ketree); } @@ -256,32 +264,32 @@ impl PolicyEnforcer { let zid = new_ctx.zid.unwrap(); //build subject here - let subject = SubjectBuilder::new().id(zid).build()?; //.attributes(None).build(); + let subject = SubjectBuilder::new().id(zid).build()?; let request = RequestBuilder::new() .sub(subject) .obj(ke) .action(action) .build()?; let decision = self.policy_decision_point(request)?; - Ok(false) - } - pub fn permission_request_builder( - msg: zenoh_protocol::network::NetworkMessage, - action: Action, - ) { - - /* - input: msg body - output: (sub,ke,act) - function: extract relevant info from the incoming msg body - build the subject [ID, Attributes and Roles] - then use that to build the request [subject, key-expression, action ] - return request to PEP - */ - /* - PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) - */ + Ok(decision) } + // pub fn permission_request_builder( + // msg: zenoh_protocol::network::NetworkMessage, + // action: Action, + // ) { + + // /* + // input: msg body + // output: (sub,ke,act) + // function: extract relevant info from the incoming msg body + // build the subject [ID, Attributes and Roles] + // then use that to build the request [subject, key-expression, action ] + // return request to PEP + // */ + // /* + // PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) + // */ + // } pub fn policy_decision_point(&self, request: Request) -> ZResult { /* @@ -329,25 +337,28 @@ impl PolicyEnforcer { /* PHASE1: just have a vector of structs containing these values; later we can load them here from config */ - let mut rule_set: Vec = Vec::new(); - let mut rdr = ReaderBuilder::new() - .has_headers(true) - .from_path("rules.csv") - .unwrap(); - - for result in rdr.deserialize() { - if let Ok(rec) = result { - let record: Rule = rec; - rule_set.push(record); - } - } - // let static_policy = r#"{ - // ["subject":{"id": 001, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - // ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - // ["subject":{"id": 002, "attributes": "location_1"},"ke":"demo/a/*","action":"Read","permission":true], - // ["subject":{"id": 003, "attributes": "location_1"},"ke":"demo/*","action":"Both","permission":true] - // }"#; - Ok(rule_set) + #[derive(Serialize, Deserialize, Clone)] + + struct Rules(Vec); // = Vec::new(); + // let mut rdr = ReaderBuilder::new() + // .has_headers(true) + // .from_path("rules.csv") + // .unwrap(); + let mut file = File::open("rules.json5").unwrap(); + let mut buff = String::new(); + file.read_to_string(&mut buff).unwrap(); + + let rulevec: Rules = serde_json::from_str(&buff).unwrap(); + // for result in rdr.deserialize() { + // if let Ok(rec) = result { + // let record: Rule = rec; + // rule_set.push(record); + // } else { + // bail!("unable to parse json file"); + // } + // } + + Ok(rulevec.0) } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index bd5f51237b..42db621f74 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -218,10 +218,10 @@ impl InterceptorTrait for IngressAclEnforcer { let new_ctx = NewCtx { ke, zid: Some(zid) }; //how to get the zid here let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { - println!("Not allowed to Write"); + // println!("Not allowed to Write"); return None; } else { - println!("Allowed to Write"); + // println!("Allowed to Write"); } } } @@ -248,10 +248,10 @@ impl InterceptorTrait for EgressAclEnforcer { let new_ctx = NewCtx { ke, zid: self.zid }; let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { - println!("Not allowed to Read"); + // println!("Not allowed to Read"); return None; } else { - println!("Allowed to Read"); + // println!("Allowed to Read"); } } } From e2a59c7b8c8ccd7cbe84868e0e0233c4335d4ffa Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 30 Jan 2024 18:41:35 +0100 Subject: [PATCH 045/122] WIP:first acl prototype --- rules.json5 | 2 +- zenoh/src/net/routing/interceptor/authz.rs | 199 ++++++--------------- zenoh/src/net/routing/interceptor/mod.rs | 6 - 3 files changed, 57 insertions(+), 150 deletions(-) diff --git a/rules.json5 b/rules.json5 index 1f1b4f7235..5fc6a6effa 100644 --- a/rules.json5 +++ b/rules.json5 @@ -6,7 +6,7 @@ }, "ke": "demo/example/**", "action": "Write", - "permission": true + "permission": false }, { "sub": { diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index fa9664e511..8b0a32435c 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,20 +1,20 @@ use std::fs::File; +use std::hash::Hash; use std::io::Read; -use std::{fmt, hash::Hash}; -use super::RoutingContext; -use csv::ReaderBuilder; +//use super::RoutingContext; +use fr_trie::key::ValueMerge; use serde::{Deserialize, Serialize}; -use serde_json::{Result, Value}; use zenoh_config::ZenohId; +//use zenoh_protocol::network::NetworkMessage; -use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; -use fr_trie::glob::acl::{Acl, AclTrie, Permissions}; +use fr_trie::glob::acl::Acl; use fr_trie::glob::GlobMatcher; +use fr_trie::trie::Trie; -use std::{collections::HashMap, error::Error}; +use std::collections::HashMap; #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { @@ -41,10 +41,25 @@ pub struct RequestBuilder { obj: Option, action: Option, } +#[derive(Clone, Debug)] -type KeTree = AclTrie; +pub enum Permissions { + Deny, + Allow, +} + +impl ValueMerge for Permissions { + fn merge(&self, _other: &Self) -> Self { + self.clone() + } + + fn merge_mut(&mut self, _other: &Self) {} +} -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +//type KeTree = AclTrie; +type KeTree = Trie; + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct Subject { id: ZenohId, attributes: Option>, //might be mapped to other types eventually @@ -127,38 +142,9 @@ impl SubjectBuilder { } } -// pub trait ZAuth { -// fn authz_testing(&self, _: String, _: String, _: String) -> ZResult; -// } - -// impl ZAuth for Enforcer { -// fn authz_testing(&self, zid: String, ke: String, act: String) -> ZResult { -// /* -// (zid, keyexpr, act): these values should be extraced from the authn code. -// has to be atomic, to avoid another process sending the wrong info -// */ -// if let Ok(authorized) = self.enforce((zid.clone(), ke.clone(), act.clone())) { -// Ok(authorized) -// } else { -// println!("policy enforcement error"); -// Ok(false) -// } -// } -// } - -/* replaced with PolicyEnforcer::init() function */ - -// pub async fn start_authz() -> Result { -// // get file value -// let mut e = Enforcer::new("keymatch_model.conf", "keymatch_policy.csv").await?; -// e.enable_log(true); -// Ok(e) -// } - -//struct that defines each policy (add policy type and ruleset) +//struct that defines a single rule in the access-control policy #[derive(Serialize, Deserialize, Clone)] pub struct Rule { - // policy_type: u8, //for l,a,r [access-list, abac, rbac type policy] will be assuming acl for now sub: Subject, ke: String, action: Action, @@ -168,9 +154,8 @@ pub struct Rule { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct SubAct(Subject, Action); -#[derive(Clone)] //, PartialEq, Eq, Hash)] -pub struct PolicyEnforcer(HashMap); //need to add tries here - +#[derive(Clone)] +pub struct PolicyEnforcer(HashMap); #[derive(Clone, PartialEq, Eq, Hash)] pub struct KeTrie {} @@ -183,44 +168,24 @@ impl PolicyEnforcer { creates the policy hashmap with the ke-tries for ke matching should have polic-type in the mix here...need to verify */ - //policy should be derived from config/file (hardcoding it for now) - //config for local static policy - // let policy_info = Self::policy_resource_point().unwrap(); - - //desearlize to vector of rules - // let rule_set: Vec = serde_json::from_str(policy_info)?; - - let rule_set = Self::policy_resource_point().unwrap(); + let rule_set = Self::policy_resource_point("rules.json5").unwrap(); let pe = Self::build_policy_map(rule_set).expect("policy not established"); //also should start the logger here Ok(pe) } pub fn build_policy_map(rule_set: Vec) -> ZResult { - //let pe: PolicyEnforcer; - let mut policy = PolicyEnforcer(HashMap::new()); - - //convert vector of rules to a hashmap mapping subact to ketree (WIP) + //convert vector of rules to a hashmap mapping subact to ketrie /* - policy = subject : [ rule_1, - rule_2, - ... - rule_n - ] - where rule_i = action_i : (ke_tree_deny, ke_tree_allow) that deny/allow action_i + representaiton of policy list as a hashmap of trees + tried KeTrees, didn't work + using fr-trie for now as a placeholder for key-matching */ - // let mut policy = Policy(HashMap::new()); - //now create a hashmap for ketrees ((sub->action)->ketree) - // let rules: HashMap; //u8 is 0 for disallowed and 1 for allowed?? - //iterate through the map to get + let mut policy = PolicyEnforcer(HashMap::new()); + //create a hashmap for ketries ((sub,action)->ketrie) from the vector of rules for v in rule_set { - //for each rule - //extract subject and action - - /* - for now permission not allowed means it will not be added to the allow trie - */ + // for now permission being false means this ke will not be inserted into the trie of allowed ke's let perm = v.permission; if !perm { continue; @@ -228,7 +193,7 @@ impl PolicyEnforcer { let sub = v.sub; let action = v.action; let ke = v.ke; - //let perm = v.permission; + //create subact let subact = SubAct(sub, action); //match subact in the policy hashmap @@ -236,25 +201,20 @@ impl PolicyEnforcer { if !policy.0.contains_key(&subact) { //create new entry for subact + ketree let mut ketree = KeTree::new(); - ketree.insert(Acl::new(&ke), Permissions::READ); - // ketree.insert(ke,1).unwrap(); //1 for allowed?? + //ketree.insert(Acl::new(&ke), Permissionssions::READ); + ketree.insert(Acl::new(&ke), Permissions::Allow); policy.0.insert(subact, ketree); } else { let ketree = policy.0.get_mut(&subact).unwrap(); - // ketree.insert(ke,1).unwrap(); //1 for allowed?? - // let mut ketree = KeTree::new(); - // let x = Permissions::READ; - ketree.insert(Acl::new(&ke), Permissions::READ); - // policy.0.insert(subact,ketree); + //ketree.insert(Acl::new(&ke), Permissionssions::READ); + ketree.insert(Acl::new(&ke), Permissions::Allow); } } - //return policy; - Ok(policy) } pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { /* - input: msg body + input: new_context and action (sub,act for now but will need attribute values later) output: allow/deny function: depending on the msg, builds the subject, builds the request, passes the request to policy_decision_point() collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table @@ -262,63 +222,35 @@ impl PolicyEnforcer { let ke = new_ctx.ke; let zid = new_ctx.zid.unwrap(); - //build subject here - + //build subject let subject = SubjectBuilder::new().id(zid).build()?; + //build request let request = RequestBuilder::new() .sub(subject) .obj(ke) .action(action) .build()?; + + //call PDP let decision = self.policy_decision_point(request)?; Ok(decision) } - // pub fn permission_request_builder( - // msg: zenoh_protocol::network::NetworkMessage, - // action: Action, - // ) { - - // /* - // input: msg body - // output: (sub,ke,act) - // function: extract relevant info from the incoming msg body - // build the subject [ID, Attributes and Roles] - // then use that to build the request [subject, key-expression, action ] - // return request to PEP - // */ - // /* - // PHASE1: just extract the ID (zid?) from the msg; can later add attributes to the list. have a struct with ID and attributes field (both Option) - // */ - // } - pub fn policy_decision_point(&self, request: Request) -> ZResult { /* input: (request) output: true(allow)/false(deny) - function: process the request from PEP against the policy (self) - policy list will(might) be a hashmap of subject:rules_vector (test and discuss) - */ - /* - PHASE1: policy decisions are hardcoded against the policy list; can later update them using a config file. - */ - //representaiton of policy list as a hashmap of trees? - // HashMap> - /* use KeTrees for mapping R/W values? //need to check this out - tried KeTrees, didn't work - need own algorithm for pattern matching via modified trie-search + function: process the request received from PEP against the policy (self) + policy list is be a hashmap of (subject,action)->ketries (test and discuss) */ - //extract subject and action from request and create subact [this is our key for hashmap] + //extract subject and action from request and create subact [this will be our key for hashmap] let subact = SubAct(request.sub, request.action); let ke = request.obj; - // type policymap = match self.0.get(&subact) { Some(ktrie) => { - // check if request ke has a match in ke-trie - // if ke in ke-trie, then Ok(true) else Ok(false) - //let trie = self.0.get.(&subact).clone(); + // check if request ke has a match in ke-trie; if ke in ketrie, then Ok(true) else Ok(false) let result = ktrie.get_merge::(&Acl::new(&ke)); - if let Some(value) = result { + if let Some(_value) = result { return Ok(true); } } @@ -328,42 +260,23 @@ impl PolicyEnforcer { Ok(false) } - pub fn policy_resource_point() -> ZResult> { + pub fn policy_resource_point(file_path: &str) -> ZResult> { /* - input: config file value along with &self - output: loads the appropriate policy into the memory and returns back self (now with added policy info); might also select AC type (ACL or ABAC) - */ - - /* - PHASE1: just have a vector of structs containing these values; later we can load them here from config + input: path to rules.json file + output: loads the appropriate policy into the memory and returns back a vector of rules; + * might also be the point to select AC type (ACL, ABAC etc)?? */ #[derive(Serialize, Deserialize, Clone)] + struct Rules(Vec); - struct Rules(Vec); // = Vec::new(); - // let mut rdr = ReaderBuilder::new() - // .has_headers(true) - // .from_path("rules.csv") - // .unwrap(); - let mut file = File::open("rules.json5").unwrap(); + let mut file = File::open(file_path).unwrap(); let mut buff = String::new(); file.read_to_string(&mut buff).unwrap(); - let rulevec: Rules = serde_json::from_str(&buff).unwrap(); - // for result in rdr.deserialize() { - // if let Ok(rec) = result { - // let record: Rule = rec; - // rule_set.push(record); - // } else { - // bail!("unable to parse json file"); - // } - // } - Ok(rulevec.0) } } -// fn ketrie_matcher(ke,ketrie){ - // } #[cfg(test)] diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 42db621f74..c53c8339ca 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -218,10 +218,7 @@ impl InterceptorTrait for IngressAclEnforcer { let new_ctx = NewCtx { ke, zid: Some(zid) }; //how to get the zid here let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { - // println!("Not allowed to Write"); return None; - } else { - // println!("Allowed to Write"); } } } @@ -248,10 +245,7 @@ impl InterceptorTrait for EgressAclEnforcer { let new_ctx = NewCtx { ke, zid: self.zid }; let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); if !decision { - // println!("Not allowed to Read"); return None; - } else { - // println!("Allowed to Read"); } } } From e4d10145d4a95851096832870f9c08cc78f4be10 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 31 Jan 2024 12:16:28 +0100 Subject: [PATCH 046/122] WIP:ACL phase 1 --- examples/examples/z_pub_thr.rs | 2 +- rules.json5 | 2 +- rules_test_thr.json5 | 38 ++++++++++++++++++++++ zenoh/src/net/routing/interceptor/authz.rs | 8 ++--- zenoh/src/net/routing/interceptor/mod.rs | 13 +++++--- 5 files changed, 52 insertions(+), 11 deletions(-) create mode 100644 rules_test_thr.json5 diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 3e130e0608..08d82d2852 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -28,7 +28,7 @@ fn main() { prio = p.try_into().unwrap(); } - let payload_size = args.payload_size; + let payload_size: usize = 1024; let data: Value = (0..payload_size) .map(|i| (i % 10) as u8) diff --git a/rules.json5 b/rules.json5 index 5fc6a6effa..0314ec8a38 100644 --- a/rules.json5 +++ b/rules.json5 @@ -15,7 +15,7 @@ }, "ke": "demo/example/zenoh-rs-pub", "action": "Write", - "permission": false + "permission": true }, { "sub": { diff --git a/rules_test_thr.json5 b/rules_test_thr.json5 new file mode 100644 index 0000000000..9161937bc7 --- /dev/null +++ b/rules_test_thr.json5 @@ -0,0 +1,38 @@ +[ + { + "sub": { + "id": "aaa3b411006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": { + "id": "bbb3b411006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": { + "id": "aaabbb11006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": { + "id": "aaabbb11006ad57868988f9fec672a31", + "attributes": null + }, + "ke": "test/thr", + "action": "Write", + "permission": true + } +] \ No newline at end of file diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 8b0a32435c..cc3d10ba7b 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -56,7 +56,6 @@ impl ValueMerge for Permissions { fn merge_mut(&mut self, _other: &Self) {} } -//type KeTree = AclTrie; type KeTree = Trie; #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)] @@ -168,7 +167,7 @@ impl PolicyEnforcer { creates the policy hashmap with the ke-tries for ke matching should have polic-type in the mix here...need to verify */ - let rule_set = Self::policy_resource_point("rules.json5").unwrap(); + let rule_set = Self::policy_resource_point("rules_test_thr.json5").unwrap(); let pe = Self::build_policy_map(rule_set).expect("policy not established"); //also should start the logger here Ok(pe) @@ -220,6 +219,7 @@ impl PolicyEnforcer { collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table */ + //get keyexpression and zid for the request; attributes will be added at this point (phase 2) let ke = new_ctx.ke; let zid = new_ctx.zid.unwrap(); //build subject @@ -243,7 +243,7 @@ impl PolicyEnforcer { policy list is be a hashmap of (subject,action)->ketries (test and discuss) */ - //extract subject and action from request and create subact [this will be our key for hashmap] + //get subject and action from request and create subact [this will be our key for hashmap] let subact = SubAct(request.sub, request.action); let ke = request.obj; match self.0.get(&subact) { @@ -264,7 +264,7 @@ impl PolicyEnforcer { /* input: path to rules.json file output: loads the appropriate policy into the memory and returns back a vector of rules; - * might also be the point to select AC type (ACL, ABAC etc)?? + * might also be the point to select AC type (ACL, ABAC etc)?? * */ #[derive(Serialize, Deserialize, Clone)] struct Rules(Vec); diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index c53c8339ca..a5a4bc0806 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -19,6 +19,8 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) //! mod authz; +use std::sync::Arc; + use self::authz::{Action, NewCtx}; use super::RoutingContext; @@ -62,8 +64,9 @@ pub(crate) fn interceptor_factories(_config: &Config) -> Vec println!("the interceptor is initialized"); let policy_enforcer = PolicyEnforcer::init().expect("error setting up access control"); + let pe = Arc::new(policy_enforcer); //store the enforcer instance for use in rest of the sessions - vec![Box::new(AclEnforcer { e: policy_enforcer })] + vec![Box::new(AclEnforcer { e: pe })] } pub(crate) struct InterceptorsChain { @@ -159,7 +162,7 @@ impl InterceptorFactoryTrait for LoggerInterceptor { } pub(crate) struct AclEnforcer { - e: PolicyEnforcer, + e: Arc, } impl InterceptorFactoryTrait for AclEnforcer { @@ -200,7 +203,7 @@ impl InterceptorFactoryTrait for AclEnforcer { pub(crate) struct IngressAclEnforcer { // e: Option, - e: PolicyEnforcer, + e: Arc, } impl InterceptorTrait for IngressAclEnforcer { @@ -227,7 +230,7 @@ impl InterceptorTrait for IngressAclEnforcer { } pub(crate) struct EgressAclEnforcer { - e: PolicyEnforcer, + e: Arc, zid: Option, } @@ -236,7 +239,7 @@ impl InterceptorTrait for EgressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - //intercept msg and send it to PEP + // intercept msg and send it to PEP if let NetworkBody::Push(push) = ctx.msg.body.clone() { if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { let e = &self.e; From b72d85e11539c74092444fef55a12dd377ab7e2a Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 1 Feb 2024 15:31:46 +0100 Subject: [PATCH 047/122] WIP:ACL phase 1 --- Cargo.lock | 31 +--- zenoh/Cargo.toml | 3 +- .../routing/interceptor/accessintercept.rs | 140 +++++++++++++++++ zenoh/src/net/routing/interceptor/authz.rs | 148 ++++++++++++++---- zenoh/src/net/routing/interceptor/mod.rs | 127 +++------------ 5 files changed, 280 insertions(+), 169 deletions(-) create mode 100644 zenoh/src/net/routing/interceptor/accessintercept.rs diff --git a/Cargo.lock b/Cargo.lock index ce5f077779..8bb241d50d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -537,9 +537,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "blake3" @@ -984,27 +984,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "csv" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" -dependencies = [ - "csv-core", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" -dependencies = [ - "memchr", -] - [[package]] name = "ctr" version = "0.6.0" @@ -2138,7 +2117,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if 1.0.0", "libc", ] @@ -3035,7 +3014,7 @@ version = "0.38.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "errno 0.3.3", "libc", "linux-raw-sys 0.4.7", @@ -4598,8 +4577,8 @@ dependencies = [ "async-std", "async-trait", "base64 0.21.4", + "bitflags 2.4.2", "const_format", - "csv", "env_logger", "event-listener 4.0.0", "flume", diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index f68cbe20ad..d87cfb1e66 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -105,9 +105,10 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } fr-trie = "*" -csv = "1.3.0" [build-dependencies] rustc_version = { workspace = true } +[dependencies.bitflags] +version = "2.4.2" [lib] name = "zenoh" diff --git a/zenoh/src/net/routing/interceptor/accessintercept.rs b/zenoh/src/net/routing/interceptor/accessintercept.rs new file mode 100644 index 0000000000..8942a07fd7 --- /dev/null +++ b/zenoh/src/net/routing/interceptor/accessintercept.rs @@ -0,0 +1,140 @@ +use std::sync::Arc; + +use zenoh_config::ZenohId; +use zenoh_protocol::{ + network::{NetworkBody, NetworkMessage, Push}, + zenoh::PushBody, +}; +use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +use crate::net::routing::{interceptor::authz::PolicyEnforcer, RoutingContext}; + +use super::{ + authz::{self, NewCtx}, + EgressInterceptor, IngressInterceptor, InterceptorFactoryTrait, InterceptorTrait, +}; +use authz::{Action, Request, Subject}; + +pub(crate) struct AclEnforcer { + pub(crate) e: Arc, +} + +impl InterceptorFactoryTrait for AclEnforcer { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option) { + let uid = transport.get_zid().unwrap(); + ( + Some(Box::new(IngressAclEnforcer { + e: self.e.clone(), + zid: Some(uid), + })), + Some(Box::new(EgressAclEnforcer { + zid: Some(uid), + e: self.e.clone(), + })), + ) + } + + fn new_transport_multicast( + &self, + _transport: &TransportMulticast, + ) -> Option { + let e = &self.e; + Some(Box::new(EgressAclEnforcer { + e: e.clone(), + zid: None, + })) + } + + fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { + let e = &self.e; + Some(Box::new(IngressAclEnforcer { + e: e.clone(), + zid: None, + })) + } +} + +struct IngressAclEnforcer { + e: Arc, + zid: Option, +} + +impl InterceptorTrait for IngressAclEnforcer { + fn intercept<'a>( + &self, + ctx: RoutingContext, + ) -> Option> { + //intercept msg and send it to PEP + if let NetworkBody::Push(Push { + payload: PushBody::Put(_), + .. + }) = &ctx.msg.body + { + let e = &self.e; + let ke: &str = ctx.full_expr().unwrap(); + let new_ctx = NewCtx { ke, zid: self.zid }; //how to get the zid here + let decision = e.policy_enforcement_point(new_ctx, Action::Write).unwrap(); + + // let sub = Subject { + // id: self.zid.unwrap(), + // attributes: None, + // }; + // let request = Request { + // sub, + // obj: ke.to_owned(), + // action: Action::Write, + // }; + // let decision = e.policy_decision_point(request).unwrap(); + + if !decision { + return None; + } + } + + Some(ctx) + } +} + +struct EgressAclEnforcer { + e: Arc, + zid: Option, +} + +impl InterceptorTrait for EgressAclEnforcer { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + // intercept msg and send it to PEP + if let NetworkBody::Push(Push { + payload: PushBody::Put(_), + .. + }) = &ctx.msg.body + { + let e = &self.e; + let ke: &str = ctx.full_expr().unwrap(); + let new_ctx = NewCtx { ke, zid: self.zid }; + let decision = e.policy_enforcement_point(new_ctx, Action::Read).unwrap(); + + // let sub = Subject { + // id: self.zid.unwrap(), + // attributes: None, + // }; + // let request = Request { + // sub, + // obj: ke.to_owned(), + // action: Action::Read, + // }; + // let decision = e.policy_decision_point(request).unwrap(); + + if !decision { + return None; + } + } + + Some(ctx) + } +} diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index cc3d10ba7b..c408356384 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -16,12 +16,28 @@ use fr_trie::trie::Trie; use std::collections::HashMap; +use bitflags::bitflags; + +bitflags! { + #[derive(Clone,PartialEq)] + pub struct ActionFlag: u8 { + const None = 0b00000000; + const Read = 0b00000001; + const Write = 0b00000010; + const DeclareSub = 0b00000100; + const Delete = 0b00001000; + const DeclareQuery = 0b00010000; + } +} + #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { None, Read, Write, - Both, + DeclareSub, + Delete, + DeclareQuery, } pub struct NewCtx<'a> { @@ -31,9 +47,9 @@ pub struct NewCtx<'a> { #[derive(Debug, Serialize, Deserialize)] pub struct Request { - sub: Subject, - obj: String, - action: Action, + pub(crate) sub: Subject, + pub(crate) obj: String, + pub(crate) action: Action, } pub struct RequestBuilder { @@ -56,12 +72,13 @@ impl ValueMerge for Permissions { fn merge_mut(&mut self, _other: &Self) {} } -type KeTree = Trie; +//type KeTree = Trie; +type KeTreeFast = Trie; #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct Subject { - id: ZenohId, - attributes: Option>, //might be mapped to other types eventually + pub(crate) id: ZenohId, + pub(crate) attributes: Option>, //might be mapped to other types eventually } //subject_builder (add ID, attributes, roles) @@ -113,7 +130,7 @@ impl RequestBuilder { impl SubjectBuilder { pub fn new() -> Self { - //creates the default request + //creates a new request SubjectBuilder { id: None, attributes: None, @@ -133,10 +150,10 @@ impl SubjectBuilder { pub fn build(&mut self) -> ZResult { let id = self.id.unwrap(); - let attr = self.attributes.clone(); + let attr = &self.attributes; Ok(Subject { id, - attributes: attr, + attributes: self.attributes.clone(), }) } } @@ -154,34 +171,40 @@ pub struct Rule { pub struct SubAct(Subject, Action); #[derive(Clone)] -pub struct PolicyEnforcer(HashMap); +//pub struct PolicyEnforcer(HashMap); +pub struct PolicyEnforcer(pub HashMap); #[derive(Clone, PartialEq, Eq, Hash)] pub struct KeTrie {} impl PolicyEnforcer { - pub fn init() -> ZResult { + pub fn init(&mut self) -> ZResult<()> { /* Initializs the policy for the control logic loads policy into memory from file/network path creates the policy hashmap with the ke-tries for ke matching should have polic-type in the mix here...need to verify */ - let rule_set = Self::policy_resource_point("rules_test_thr.json5").unwrap(); - let pe = Self::build_policy_map(rule_set).expect("policy not established"); + // let rule_set = Self::policy_resource_point("rules_test_thr.json5").unwrap(); + // let pe = Self::build_policy_map_with_sub(rule_set).expect("policy not established"); + + let rule_set = self.policy_resource_point("rules_test_thr.json5").unwrap(); + self.build_policy_map_with(rule_set) + .expect("policy not established"); + //also should start the logger here - Ok(pe) + Ok(()) } - pub fn build_policy_map(rule_set: Vec) -> ZResult { + pub fn build_policy_map_with(&mut self, rule_set: Vec) -> ZResult<()> { //convert vector of rules to a hashmap mapping subact to ketrie /* representaiton of policy list as a hashmap of trees tried KeTrees, didn't work using fr-trie for now as a placeholder for key-matching */ - - let mut policy = PolicyEnforcer(HashMap::new()); + let mut policy = self; + //let mut policy = PolicyEnforcer(HashMap::new()); //create a hashmap for ketries ((sub,action)->ketrie) from the vector of rules for v in rule_set { // for now permission being false means this ke will not be inserted into the trie of allowed ke's @@ -190,27 +213,74 @@ impl PolicyEnforcer { continue; } let sub = v.sub; - let action = v.action; let ke = v.ke; + let action_flag = match v.action { + Action::Read => ActionFlag::Read, + Action::Write => ActionFlag::Write, + Action::None => ActionFlag::None, + Action::DeclareSub => ActionFlag::DeclareSub, + Action::Delete => ActionFlag::Delete, + Action::DeclareQuery => ActionFlag::DeclareQuery, + }; //create subact - let subact = SubAct(sub, action); + //let subact = SubAct(sub, action); //match subact in the policy hashmap #[allow(clippy::map_entry)] - if !policy.0.contains_key(&subact) { + if !policy.0.contains_key(&sub) { //create new entry for subact + ketree - let mut ketree = KeTree::new(); + let mut ketree = KeTreeFast::new(); //ketree.insert(Acl::new(&ke), Permissionssions::READ); - ketree.insert(Acl::new(&ke), Permissions::Allow); - policy.0.insert(subact, ketree); + ketree.insert(Acl::new(&ke), action_flag); + policy.0.insert(sub, ketree); } else { - let ketree = policy.0.get_mut(&subact).unwrap(); + let ketree = policy.0.get_mut(&sub).unwrap(); //ketree.insert(Acl::new(&ke), Permissionssions::READ); - ketree.insert(Acl::new(&ke), Permissions::Allow); + ketree.insert(Acl::new(&ke), action_flag); } } - Ok(policy) + Ok(()) } + + // pub fn build_policy_map(rule_set: Vec) -> ZResult { + // //convert vector of rules to a hashmap mapping subact to ketrie + // /* + // representaiton of policy list as a hashmap of trees + // tried KeTrees, didn't work + // using fr-trie for now as a placeholder for key-matching + // */ + // let mut policy = PolicyEnforcer(HashMap::new()); + // //create a hashmap for ketries ((sub,action)->ketrie) from the vector of rules + // for v in rule_set { + // // for now permission being false means this ke will not be inserted into the trie of allowed ke's + // let perm = v.permission; + // if !perm { + // continue; + // } + // let sub = v.sub; + // // let action = v.action; + // let action_flag = v.action as isize; + // let ke = v.ke; + + // //create subact + // // let subact = SubAct(sub, action); + // //match subact in the policy hashmap + // #[allow(clippy::map_entry)] + // if !policy.0.contains_key(&sub) { + // //create new entry for subact + ketree + // let mut ketree = KeTree::new(); + // //ketree.insert(Acl::new(&ke), Permissionssions::READ); + // ketree.insert(Acl::new(&ke), ActionFlag::); + // policy.0.insert(subact, ketree); + // } else { + // let ketree = policy.0.get_mut(&sub).unwrap(); + // //ketree.insert(Acl::new(&ke), Permissionssions::READ); + // ketree.insert(Acl::new(&ke), Permissions::Allow); + // } + // } + // Ok(policy) + // } + pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { /* input: new_context and action (sub,act for now but will need attribute values later) @@ -244,23 +314,33 @@ impl PolicyEnforcer { */ //get subject and action from request and create subact [this will be our key for hashmap] - let subact = SubAct(request.sub, request.action); + // let subact = SubAct(request.sub, request.action); + let action_flag = match request.action { + Action::Read => ActionFlag::Read, + Action::Write => ActionFlag::Write, + Action::None => ActionFlag::None, + Action::DeclareSub => ActionFlag::DeclareSub, + Action::Delete => ActionFlag::Delete, + Action::DeclareQuery => ActionFlag::DeclareQuery, + }; let ke = request.obj; - match self.0.get(&subact) { + match self.0.get(&request.sub) { Some(ktrie) => { // check if request ke has a match in ke-trie; if ke in ketrie, then Ok(true) else Ok(false) - let result = ktrie.get_merge::(&Acl::new(&ke)); - if let Some(_value) = result { - return Ok(true); + //let result = ktrie.get_merge::(&Acl::new(&ke)); + let result = ktrie.get::(&Acl::new(&ke)); + if let Some(value) = result { + if (value & action_flag) != ActionFlag::None { + return Ok(true); + } } } None => return Ok(false), } - Ok(false) } - pub fn policy_resource_point(file_path: &str) -> ZResult> { + pub fn policy_resource_point(&self, file_path: &str) -> ZResult> { /* input: path to rules.json file output: loads the appropriate policy into the memory and returns back a vector of rules; diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index a5a4bc0806..c9777ae0c5 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -18,17 +18,16 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) //! +mod accessintercept; mod authz; -use std::sync::Arc; - -use self::authz::{Action, NewCtx}; +use std::{collections::HashMap, sync::Arc}; use super::RoutingContext; -use crate::net::routing::interceptor::authz::PolicyEnforcer; -use zenoh_config::{Config, ZenohId}; -use zenoh_protocol::network::{NetworkBody, NetworkMessage}; +//use crate::net::routing::interceptor::authz; +use crate::net::routing::interceptor::{accessintercept::AclEnforcer, authz::PolicyEnforcer}; +use zenoh_config::Config; +use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; - pub(crate) trait InterceptorTrait { fn intercept( &self, @@ -54,19 +53,27 @@ pub(crate) type InterceptorFactory = Box Vec { // Add interceptors here // TODO build the list of intercetors with the correct order from the config - // vec![Box::new(LoggerInterceptor {})] + let mut res: Vec = vec![]; /* this is the singleton for interceptors all init code for AC should be called here example, for casbin we are using the enforecer init here for in-built AC, we will load the policy rules here and also set the parameters (type of policy etc) */ - println!("the interceptor is initialized"); - let policy_enforcer = PolicyEnforcer::init().expect("error setting up access control"); - let pe = Arc::new(policy_enforcer); + /* if config condition is selected this will be initialiased; putting true for now */ + if true { + println!("the interceptor is initialized"); + let mut policy_enforcer = PolicyEnforcer(HashMap::new()); + match policy_enforcer.init() { + Ok(_) => res.push(Box::new(AclEnforcer { + e: Arc::new(policy_enforcer), + })), + Err(e) => log::error!("access control not initialized with error {}!", e), + } + } //store the enforcer instance for use in rest of the sessions - vec![Box::new(AclEnforcer { e: pe })] + res } pub(crate) struct InterceptorsChain { @@ -160,99 +167,3 @@ impl InterceptorFactoryTrait for LoggerInterceptor { Some(Box::new(IngressMsgLogger {})) } } - -pub(crate) struct AclEnforcer { - e: Arc, -} - -impl InterceptorFactoryTrait for AclEnforcer { - fn new_transport_unicast( - &self, - transport: &TransportUnicast, - ) -> (Option, Option) { - let e = &self.e; - - let uid = transport.get_zid().unwrap(); - ( - Some(Box::new(IngressAclEnforcer { e: e.clone() })), - Some(Box::new(EgressAclEnforcer { - zid: Some(uid), - e: e.clone(), - })), - ) - } - - fn new_transport_multicast( - &self, - _transport: &TransportMulticast, - ) -> Option { - let e = &self.e; - //let uid = _transport.get_zid().unwrap(); - - Some(Box::new(EgressAclEnforcer { - e: e.clone(), - zid: None, - })) - } - - fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - let e = &self.e; - Some(Box::new(IngressAclEnforcer { e: e.clone() })) - } -} - -pub(crate) struct IngressAclEnforcer { - // e: Option, - e: Arc, -} - -impl InterceptorTrait for IngressAclEnforcer { - fn intercept( - &self, - ctx: RoutingContext, - ) -> Option> { - //intercept msg and send it to PEP - if let NetworkBody::Push(push) = ctx.msg.body.clone() { - if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { - let e = &self.e; - let act = Action::Write; - let ke: &str = ctx.full_expr().unwrap(); - let zid = ctx.inface().unwrap().state.zid; - let new_ctx = NewCtx { ke, zid: Some(zid) }; //how to get the zid here - let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); - if !decision { - return None; - } - } - } - Some(ctx) - } -} - -pub(crate) struct EgressAclEnforcer { - e: Arc, - zid: Option, -} - -impl InterceptorTrait for EgressAclEnforcer { - fn intercept( - &self, - ctx: RoutingContext, - ) -> Option> { - // intercept msg and send it to PEP - if let NetworkBody::Push(push) = ctx.msg.body.clone() { - if let zenoh_protocol::zenoh::PushBody::Put(_put) = push.payload { - let e = &self.e; - let act = Action::Read; - let ke: &str = ctx.full_expr().unwrap(); - let new_ctx = NewCtx { ke, zid: self.zid }; - let decision = e.policy_enforcement_point(new_ctx, act).unwrap(); - if !decision { - return None; - } - } - } - - Some(ctx) - } -} From f7a26e25d600e98f8719001230f2982f79110997 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 8 Feb 2024 09:23:29 +0100 Subject: [PATCH 048/122] WIP:Modified ACL for attributes --- Cargo.lock | 1 + zenoh/Cargo.toml | 2 + zenoh/src/net/routing/dispatcher/resource.rs | 7 +- .../routing/interceptor/accessintercept.rs | 86 ++-- zenoh/src/net/routing/interceptor/authz.rs | 379 +++++++++++------- zenoh/src/net/routing/interceptor/mod.rs | 8 +- zenoh/src/net/routing/mod.rs | 9 +- 7 files changed, 285 insertions(+), 207 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bb241d50d..9b4291712f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4594,6 +4594,7 @@ dependencies = [ "petgraph", "rand 0.8.5", "regex", + "rustc-hash", "rustc_version 0.4.0", "serde", "serde_json", diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index d87cfb1e66..1866ebffa4 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -105,11 +105,13 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } fr-trie = "*" +rustc-hash = "1.1.0" [build-dependencies] rustc_version = { workspace = true } [dependencies.bitflags] version = "2.4.2" + [lib] name = "zenoh" diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs index 8a183088d6..84b5679471 100644 --- a/zenoh/src/net/routing/dispatcher/resource.rs +++ b/zenoh/src/net/routing/dispatcher/resource.rs @@ -214,7 +214,12 @@ impl Resource { pub fn expr(&self) -> String { match &self.parent { - Some(parent) => parent.expr() + &self.suffix, + // Some(parent) => parent.expr() + &self.suffix, + Some(parent) => { + let mut string = parent.expr(); + string.push_str(&self.suffix); + string + } None => String::from(""), } } diff --git a/zenoh/src/net/routing/interceptor/accessintercept.rs b/zenoh/src/net/routing/interceptor/accessintercept.rs index 8942a07fd7..d9030424a4 100644 --- a/zenoh/src/net/routing/interceptor/accessintercept.rs +++ b/zenoh/src/net/routing/interceptor/accessintercept.rs @@ -7,16 +7,14 @@ use zenoh_protocol::{ }; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; -use crate::net::routing::{interceptor::authz::PolicyEnforcer, RoutingContext}; +use crate::net::routing::RoutingContext; use super::{ - authz::{self, NewCtx}, + authz::{ActionFlag, NewCtx, NewPolicyEnforcer}, EgressInterceptor, IngressInterceptor, InterceptorFactoryTrait, InterceptorTrait, }; -use authz::{Action, Request, Subject}; - pub(crate) struct AclEnforcer { - pub(crate) e: Arc, + pub(crate) e: Arc, } impl InterceptorFactoryTrait for AclEnforcer { @@ -28,10 +26,10 @@ impl InterceptorFactoryTrait for AclEnforcer { ( Some(Box::new(IngressAclEnforcer { e: self.e.clone(), - zid: Some(uid), + zid: uid, })), Some(Box::new(EgressAclEnforcer { - zid: Some(uid), + zid: uid, e: self.e.clone(), })), ) @@ -44,7 +42,7 @@ impl InterceptorFactoryTrait for AclEnforcer { let e = &self.e; Some(Box::new(EgressAclEnforcer { e: e.clone(), - zid: None, + zid: ZenohId::default(), })) } @@ -52,14 +50,14 @@ impl InterceptorFactoryTrait for AclEnforcer { let e = &self.e; Some(Box::new(IngressAclEnforcer { e: e.clone(), - zid: None, + zid: ZenohId::default(), })) } } struct IngressAclEnforcer { - e: Arc, - zid: Option, + e: Arc, + zid: ZenohId, } impl InterceptorTrait for IngressAclEnforcer { @@ -74,23 +72,21 @@ impl InterceptorTrait for IngressAclEnforcer { }) = &ctx.msg.body { let e = &self.e; - let ke: &str = ctx.full_expr().unwrap(); - let new_ctx = NewCtx { ke, zid: self.zid }; //how to get the zid here - let decision = e.policy_enforcement_point(new_ctx, Action::Write).unwrap(); - - // let sub = Subject { - // id: self.zid.unwrap(), - // attributes: None, - // }; - // let request = Request { - // sub, - // obj: ke.to_owned(), - // action: Action::Write, - // }; - // let decision = e.policy_decision_point(request).unwrap(); - if !decision { - return None; + let ke: String = ctx.full_expr().unwrap().to_owned(); + // let ke: String = "test/thr".to_owned(); //for testing + let new_ctx = NewCtx { + ke: &ke, + zid: self.zid, + attributes: None, + }; + match e.policy_enforcement_point(new_ctx, ActionFlag::Write) { + Ok(decision) => { + if !decision { + return None; + } + } + Err(_) => return None, } } @@ -99,8 +95,8 @@ impl InterceptorTrait for IngressAclEnforcer { } struct EgressAclEnforcer { - e: Arc, - zid: Option, + e: Arc, + zid: ZenohId, } impl InterceptorTrait for EgressAclEnforcer { @@ -115,23 +111,21 @@ impl InterceptorTrait for EgressAclEnforcer { }) = &ctx.msg.body { let e = &self.e; - let ke: &str = ctx.full_expr().unwrap(); - let new_ctx = NewCtx { ke, zid: self.zid }; - let decision = e.policy_enforcement_point(new_ctx, Action::Read).unwrap(); - - // let sub = Subject { - // id: self.zid.unwrap(), - // attributes: None, - // }; - // let request = Request { - // sub, - // obj: ke.to_owned(), - // action: Action::Read, - // }; - // let decision = e.policy_decision_point(request).unwrap(); - - if !decision { - return None; + let ke: String = ctx.full_expr().unwrap().to_owned(); + + // let ke: String = "test/thr".to_owned(); //for testing + let new_ctx = NewCtx { + ke: &ke, + zid: self.zid, + attributes: None, + }; + match e.policy_enforcement_point(new_ctx, ActionFlag::Read) { + Ok(decision) => { + if !decision { + return None; + } + } + Err(_) => return None, } } diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index c408356384..de62cf77bf 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,26 +1,32 @@ +use std::collections::HashMap; use std::fs::File; -use std::hash::Hash; +use std::hash::{Hash, Hasher}; use std::io::Read; +use std::net::Ipv4Addr; +use std::str::FromStr; +use fr_trie::glob::GlobMatcher; +//use serde::ser::SerializeStruct; //use super::RoutingContext; -use fr_trie::key::ValueMerge; use serde::{Deserialize, Serialize}; use zenoh_config::ZenohId; //use zenoh_protocol::network::NetworkMessage; -use zenoh_result::ZResult; - use fr_trie::glob::acl::Acl; -use fr_trie::glob::GlobMatcher; +use rustc_hash::FxHashMap; +use zenoh_result::ZResult; +//use fr_trie::glob::GlobMatcher; use fr_trie::trie::Trie; -use std::collections::HashMap; +//use std::collections::HashMap; use bitflags::bitflags; +#[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] +pub struct ActionFlag(u8); + bitflags! { - #[derive(Clone,PartialEq)] - pub struct ActionFlag: u8 { + impl ActionFlag: u8 { const None = 0b00000000; const Read = 0b00000001; const Write = 0b00000010; @@ -42,50 +48,76 @@ pub enum Action { pub struct NewCtx<'a> { pub(crate) ke: &'a str, - pub(crate) zid: Option, + pub(crate) zid: ZenohId, + pub(crate) attributes: Option, } #[derive(Debug, Serialize, Deserialize)] pub struct Request { pub(crate) sub: Subject, pub(crate) obj: String, - pub(crate) action: Action, + pub(crate) action: ActionFlag, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NewRequest { + pub(crate) sub: PolicySubject, + pub(crate) obj: String, + pub(crate) action: ActionFlag, } pub struct RequestBuilder { sub: Option, obj: Option, - action: Option, + action: Option, } -#[derive(Clone, Debug)] +//#[derive(Clone, Debug)] -pub enum Permissions { - Deny, - Allow, -} +// pub enum Permissions { +// Deny, +// Allow, +// } + +type KeTreeFast = Trie; -impl ValueMerge for Permissions { - fn merge(&self, _other: &Self) -> Self { - self.clone() +#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] + +pub struct Attributes(HashMap); + +// impl Into for Option { +// fn into(self) -> Attributes { +// self::Attributes +// } +// } + +impl std::hash::Hash for Attributes { + fn hash(&self, state: &mut H) { + let mut pairs: Vec<_> = self.0.iter().collect(); + pairs.sort_by_key(|i| i.0); + Hash::hash(&pairs, state); } - fn merge_mut(&mut self, _other: &Self) {} + fn hash_slice(data: &[Self], state: &mut H) + where + Self: Sized, + { + for piece in data { + piece.hash(state) + } + } } -//type KeTree = Trie; -type KeTreeFast = Trie; - #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct Subject { pub(crate) id: ZenohId, - pub(crate) attributes: Option>, //might be mapped to other types eventually + pub(crate) attributes: Option, //might be mapped to other types eventually } //subject_builder (add ID, attributes, roles) pub struct SubjectBuilder { id: Option, - attributes: Option>, + attributes: Option, } //request_builder (add subject, resource, action) //do we need one? @@ -105,17 +137,17 @@ impl RequestBuilder { pub fn sub(&mut self, sub: impl Into) -> &mut Self { //adds subject - self.sub.insert(sub.into()); + let _ = self.sub.insert(sub.into()); self } pub fn obj(&mut self, obj: impl Into) -> &mut Self { - self.obj.insert(obj.into()); + let _ = self.obj.insert(obj.into()); self } - pub fn action(&mut self, action: impl Into) -> &mut Self { - self.action.insert(action.into()); + pub fn action(&mut self, action: impl Into) -> &mut Self { + let _ = self.action.insert(action.into()); self } @@ -139,73 +171,102 @@ impl SubjectBuilder { pub fn id(&mut self, id: impl Into) -> &mut Self { //adds subject - self.id.insert(id.into()); + let _ = self.id.insert(id.into()); self } - pub fn attributes(&mut self, attributes: impl Into>) -> &mut Self { - self.attributes.insert(attributes.into()); + pub fn attributes(&mut self, attributes: impl Into) -> &mut Self { + let _ = self.attributes.insert(attributes.into()); self } pub fn build(&mut self) -> ZResult { let id = self.id.unwrap(); - let attr = &self.attributes; + let attr = self.attributes.as_ref(); Ok(Subject { id, - attributes: self.attributes.clone(), + attributes: attr.cloned(), }) } } -//struct that defines a single rule in the access-control policy -#[derive(Serialize, Deserialize, Clone)] -pub struct Rule { - sub: Subject, - ke: String, - action: Action, - permission: bool, +#[derive(Debug, Clone, Serialize, Deserialize, Eq, Hash, PartialEq)] +pub enum PolicySubject { + Id(ZenohId), + Attribute(Attribute), } -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct SubAct(Subject, Action); +//single attribute per policy check +#[derive(Eq, Clone, Hash, PartialEq, Serialize, Debug, Deserialize)] +pub enum Attribute { + IPRange(Ipv4Addr), + Networktype(u8), //1 for wifi,2 for lan etc +} +//#[derive(Eq, Hash, PartialEq)] +type SinglePolicy = FxHashMap; +pub struct NewPolicyEnforcer( + u8, //stores types of policies (for now just 1,2,3 for userid,attribute,both) + pub Vec, //stores +); -#[derive(Clone)] -//pub struct PolicyEnforcer(HashMap); -pub struct PolicyEnforcer(pub HashMap); #[derive(Clone, PartialEq, Eq, Hash)] - pub struct KeTrie {} -impl PolicyEnforcer { +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct PolicyRule { + sub: PolicySubject, + ke: String, + action: Action, + permission: bool, +} +impl NewPolicyEnforcer { + pub fn new() -> ZResult { + // PolicyEnforcer + Ok(NewPolicyEnforcer(0, Vec::new())) + } + pub fn init(&mut self) -> ZResult<()> { /* - Initializs the policy for the control logic + Initializes the policy for the control logic loads policy into memory from file/network path creates the policy hashmap with the ke-tries for ke matching - should have polic-type in the mix here...need to verify + can have policy-type in the mix here...need to verify */ - // let rule_set = Self::policy_resource_point("rules_test_thr.json5").unwrap(); - // let pe = Self::build_policy_map_with_sub(rule_set).expect("policy not established"); + //set the policy type to 1,2,3 depending on the user input in the "rules" file - let rule_set = self.policy_resource_point("rules_test_thr.json5").unwrap(); - self.build_policy_map_with(rule_set) + let (rule_type, rule_set) = self.policy_resource_point("rules_test_thr.json5").unwrap(); + self.build_policy_map(rule_type, rule_set) .expect("policy not established"); + /* setup a temporary variable here to hold all the values */ //also should start the logger here Ok(()) } - pub fn build_policy_map_with(&mut self, rule_set: Vec) -> ZResult<()> { + pub fn build_policy_map(&mut self, rule_type: u8, rule_set: Vec) -> ZResult<()> { //convert vector of rules to a hashmap mapping subact to ketrie /* - representaiton of policy list as a hashmap of trees - tried KeTrees, didn't work + representaiton of policy list as a vector of hashmap of trees + each hashmap maps a subject (ID/atttribute) to a trie of allowed values using fr-trie for now as a placeholder for key-matching */ - let mut policy = self; - //let mut policy = PolicyEnforcer(HashMap::new()); - //create a hashmap for ketries ((sub,action)->ketrie) from the vector of rules + self.0 = rule_type; + let map_subject_policy = &mut self.1; + match rule_type { + 1 => map_subject_policy.push(Self::build_id_map(rule_set)), + + 2 => map_subject_policy.push(Self::build_attribute_map(rule_set)), + 3 | 4 => { + map_subject_policy.push(Self::build_id_map(rule_set.clone())); + map_subject_policy.push(Self::build_attribute_map(rule_set)); + } + _ => bail!("bad entry for type"), + } + Ok(()) + } + + pub fn build_id_map(rule_set: Vec) -> SinglePolicy { + let mut policy: SinglePolicy = FxHashMap::default(); for v in rule_set { // for now permission being false means this ke will not be inserted into the trie of allowed ke's let perm = v.permission; @@ -223,137 +284,149 @@ impl PolicyEnforcer { Action::DeclareQuery => ActionFlag::DeclareQuery, }; - //create subact - //let subact = SubAct(sub, action); - //match subact in the policy hashmap + //match subject to the policy hashmap #[allow(clippy::map_entry)] - if !policy.0.contains_key(&sub) { - //create new entry for subact + ketree + if !policy.contains_key(&sub) { + //create new entry for subject + ke-tree let mut ketree = KeTreeFast::new(); - //ketree.insert(Acl::new(&ke), Permissionssions::READ); ketree.insert(Acl::new(&ke), action_flag); - policy.0.insert(sub, ketree); + policy.insert(sub, ketree); } else { - let ketree = policy.0.get_mut(&sub).unwrap(); - //ketree.insert(Acl::new(&ke), Permissionssions::READ); - ketree.insert(Acl::new(&ke), action_flag); + let ketree = policy.get_mut(&sub).unwrap(); + let old_flag = ketree.get::(&Acl::new(&ke)).unwrap(); + ketree.insert(Acl::new(&ke), action_flag | old_flag); //update old flag } } - Ok(()) + policy } - // pub fn build_policy_map(rule_set: Vec) -> ZResult { - // //convert vector of rules to a hashmap mapping subact to ketrie - // /* - // representaiton of policy list as a hashmap of trees - // tried KeTrees, didn't work - // using fr-trie for now as a placeholder for key-matching - // */ - // let mut policy = PolicyEnforcer(HashMap::new()); - // //create a hashmap for ketries ((sub,action)->ketrie) from the vector of rules - // for v in rule_set { - // // for now permission being false means this ke will not be inserted into the trie of allowed ke's - // let perm = v.permission; - // if !perm { - // continue; - // } - // let sub = v.sub; - // // let action = v.action; - // let action_flag = v.action as isize; - // let ke = v.ke; - - // //create subact - // // let subact = SubAct(sub, action); - // //match subact in the policy hashmap - // #[allow(clippy::map_entry)] - // if !policy.0.contains_key(&sub) { - // //create new entry for subact + ketree - // let mut ketree = KeTree::new(); - // //ketree.insert(Acl::new(&ke), Permissionssions::READ); - // ketree.insert(Acl::new(&ke), ActionFlag::); - // policy.0.insert(subact, ketree); - // } else { - // let ketree = policy.0.get_mut(&sub).unwrap(); - // //ketree.insert(Acl::new(&ke), Permissionssions::READ); - // ketree.insert(Acl::new(&ke), Permissions::Allow); - // } - // } - // Ok(policy) - // } - - pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: Action) -> ZResult { + fn build_attribute_map(rule_set: Vec) -> SinglePolicy { + let x: SinglePolicy = FxHashMap::default(); + return x; + } + + pub fn policy_enforcement_point(&self, new_ctx: NewCtx, action: ActionFlag) -> ZResult { /* input: new_context and action (sub,act for now but will need attribute values later) - output: allow/deny + output: allow/denyca function: depending on the msg, builds the subject, builds the request, passes the request to policy_decision_point() collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table */ //get keyexpression and zid for the request; attributes will be added at this point (phase 2) + let ke = new_ctx.ke; - let zid = new_ctx.zid.unwrap(); - //build subject - let subject = SubjectBuilder::new().id(zid).build()?; + let zid = new_ctx.zid; + let attribute = new_ctx.attributes; + // if let Some(value) = new_ctx.attributes { + // attribute = value; + // } + // let subject = SubjectBuilder::new().id(zid).build()?; //build request - let request = RequestBuilder::new() - .sub(subject) - .obj(ke) - .action(action) - .build()?; - - //call PDP - let decision = self.policy_decision_point(request)?; + // let request = RequestBuilder::new() + // .sub(subject) + // .obj(ke) + // .action(action) + // .build()?; + + let subject = PolicySubject::Id(zid); + let request = NewRequest { + sub: subject, + obj: ke.to_owned(), + action, + }; + let decision = self.policy_decision_point(request); Ok(decision) } - pub fn policy_decision_point(&self, request: Request) -> ZResult { + + pub fn policy_decision_point(&self, request: NewRequest) -> bool { /* input: (request) output: true(allow)/false(deny) function: process the request received from PEP against the policy (self) - policy list is be a hashmap of (subject,action)->ketries (test and discuss) + the policy list is chosen based on the policy-type specified in the rules file + policy list is be a hashmap of subject->ketries (test and discuss) */ - //get subject and action from request and create subact [this will be our key for hashmap] - // let subact = SubAct(request.sub, request.action); - let action_flag = match request.action { - Action::Read => ActionFlag::Read, - Action::Write => ActionFlag::Write, - Action::None => ActionFlag::None, - Action::DeclareSub => ActionFlag::DeclareSub, - Action::Delete => ActionFlag::Delete, - Action::DeclareQuery => ActionFlag::DeclareQuery, + //compare the request to the vec of values...matching depends on the value of the policy type + match self.0 { + 1 => { + //check the id map (value 0) + return self.matching_algo(0, request); + } + 2 => { + //check the attribute map (value 1) + return self.matching_algo(1, request); + } + 3 => { + //check both the maps and do an OR (either ID or attribute should match for allow) + return self.matching_algo(0, request.clone()) || self.matching_algo(1, request); + } + 4 => { + //check both the maps and do AND (both ID and attribute should match for allow) + return self.matching_algo(0, request.clone()) && self.matching_algo(1, request); + } + _ => { + //wrong value; deny request + false + } }; + + false + } + + pub fn matching_algo(&self, matching_type: usize, request: NewRequest) -> bool { + // return true; let ke = request.obj; - match self.0.get(&request.sub) { + let sub = request.sub; + let action = request.action; + + match self.1[matching_type].get(&sub) { Some(ktrie) => { - // check if request ke has a match in ke-trie; if ke in ketrie, then Ok(true) else Ok(false) - //let result = ktrie.get_merge::(&Acl::new(&ke)); + // check if request ke has a match in ke-trie; if ke in ketrie, then true (allow) else false (deny) let result = ktrie.get::(&Acl::new(&ke)); - if let Some(value) = result { - if (value & action_flag) != ActionFlag::None { - return Ok(true); - } + match result { + Some(value) => (value & action) != ActionFlag::None, + None => false, } } - None => return Ok(false), + None => false, } - Ok(false) } - pub fn policy_resource_point(&self, file_path: &str) -> ZResult> { + pub fn policy_resource_point(&self, file_path: &str) -> ZResult<(u8, Vec)> { /* input: path to rules.json file - output: loads the appropriate policy into the memory and returns back a vector of rules; + output: loads the appropriate policy into the memory and returns back a vector of rules and the policy type as specified in the file; * might also be the point to select AC type (ACL, ABAC etc)?? * */ - #[derive(Serialize, Deserialize, Clone)] - struct Rules(Vec); - - let mut file = File::open(file_path).unwrap(); - let mut buff = String::new(); - file.read_to_string(&mut buff).unwrap(); - let rulevec: Rules = serde_json::from_str(&buff).unwrap(); - Ok(rulevec.0) + + let policytype: u8 = 1; + + let vec_ids = [ + "aaa3b411006ad57868988f9fec672a31", + "bbb3b411006ad57868988f9fec672a31", + "aaabbb11006ad57868988f9fec672a31", + "aaabbb11006ad57868988f9fec672a31", + ]; + let vec_actions: Vec = + vec![Action::Write, Action::Read, Action::Write, Action::Read]; + //let vec_perms = []; + let mut policyrules: Vec = Vec::new(); + + for i in 0..4 { + policyrules.push(PolicyRule { + sub: PolicySubject::Id(ZenohId::from_str(vec_ids[i]).unwrap()), + ke: "test/thr".to_string(), + action: vec_actions.get(i).unwrap().clone(), + permission: true, + }); + } + //let policyruleset: PolicyRuleSet; + + println!("policy rules : {:?}", policyrules); + + Ok((policytype, policyrules)) } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index c9777ae0c5..30f68cfb34 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -20,11 +20,11 @@ //! mod accessintercept; mod authz; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; use super::RoutingContext; //use crate::net::routing::interceptor::authz; -use crate::net::routing::interceptor::{accessintercept::AclEnforcer, authz::PolicyEnforcer}; +use crate::net::routing::interceptor::{accessintercept::AclEnforcer, authz::NewPolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; @@ -62,9 +62,9 @@ pub(crate) fn interceptor_factories(_config: &Config) -> Vec */ /* if config condition is selected this will be initialiased; putting true for now */ - if true { + if false { println!("the interceptor is initialized"); - let mut policy_enforcer = PolicyEnforcer(HashMap::new()); + let mut policy_enforcer = NewPolicyEnforcer::new().unwrap(); //(HashMap::new()); match policy_enforcer.init() { Ok(_) => res.push(Box::new(AclEnforcer { e: Arc::new(policy_enforcer), diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs index 0b069c1337..06340f7e12 100644 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -140,9 +140,12 @@ impl RoutingContext { } } if let Some(prefix) = self.prefix.get().cloned() { - let _ = self - .full_expr - .set(prefix.expr() + wire_expr.suffix.as_ref()); + // let _ = self + // .full_expr + // .set(prefix.expr() + wire_expr.suffix.as_ref()); + let mut full_expr = prefix.expr(); + full_expr.push_str(wire_expr.suffix.as_ref()); + let _ = self.full_expr.set(full_expr); return Some(self.full_expr.get().as_ref().unwrap()); } } From f52d3cdb9b33179638a23ed18abc0c352c34dd6e Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 8 Feb 2024 09:27:59 +0100 Subject: [PATCH 049/122] WIP:Modified ACL for attributes --- zenoh/src/net/routing/interceptor/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 30f68cfb34..b8d23e5fc8 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -23,7 +23,6 @@ mod authz; use std::sync::Arc; use super::RoutingContext; -//use crate::net::routing::interceptor::authz; use crate::net::routing::interceptor::{accessintercept::AclEnforcer, authz::NewPolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; @@ -62,7 +61,7 @@ pub(crate) fn interceptor_factories(_config: &Config) -> Vec */ /* if config condition is selected this will be initialiased; putting true for now */ - if false { + if true { println!("the interceptor is initialized"); let mut policy_enforcer = NewPolicyEnforcer::new().unwrap(); //(HashMap::new()); match policy_enforcer.init() { From 39a4f7828f0466f1609eaf25b070d91aa6865149 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 19 Feb 2024 17:56:56 +0100 Subject: [PATCH 050/122] WIP:Cleaned code --- zenoh/src/net/routing/interceptor/authz.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index de62cf77bf..282e170f23 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -6,19 +6,15 @@ use std::net::Ipv4Addr; use std::str::FromStr; use fr_trie::glob::GlobMatcher; -//use serde::ser::SerializeStruct; -//use super::RoutingContext; + use serde::{Deserialize, Serialize}; use zenoh_config::ZenohId; -//use zenoh_protocol::network::NetworkMessage; use fr_trie::glob::acl::Acl; use rustc_hash::FxHashMap; use zenoh_result::ZResult; -//use fr_trie::glob::GlobMatcher; -use fr_trie::trie::Trie; -//use std::collections::HashMap; +use fr_trie::trie::Trie; use bitflags::bitflags; From b7f9c717291c1cf55f7aef4517686cdf5fb2f47b Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 19 Feb 2024 18:18:10 +0100 Subject: [PATCH 051/122] WIP:merging code --- Cargo.lock | 12 +- DEFAULT_CONFIG.json5 | 29 +- commons/zenoh-config/src/defaults.rs | 10 + commons/zenoh-config/src/lib.rs | 43 +- pub_config.json5 | 7 + rules.json5 | 47 -- rules_test.json5 | 68 +++ rules_test_thr.json5 | 38 -- sub_config.json5 | 7 + zenoh/Cargo.toml | 5 +- .../src/net/routing/interceptor/old_authz.rs | 426 ------------------ 11 files changed, 115 insertions(+), 577 deletions(-) delete mode 100644 rules.json5 create mode 100644 rules_test.json5 delete mode 100644 rules_test_thr.json5 delete mode 100644 zenoh/src/net/routing/interceptor/old_authz.rs diff --git a/Cargo.lock b/Cargo.lock index b454f487c4..916574adcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1265,16 +1265,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "fr-trie" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44e22abe50d63925651ab549a8ffdfe6cb6a7b058ea1cf99fd4c2b6dbc814d61" -dependencies = [ - "bitflags 1.3.2", - "serde", -] - [[package]] name = "fraction" version = "0.13.1" @@ -4584,10 +4574,10 @@ dependencies = [ "event-listener 4.0.0", "flume", "form_urlencoded", - "fr-trie", "futures", "git-version", "hex", + "ipnetwork", "lazy_static", "log", "ordered-float", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 21750a25de..e7ac6b74b7 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -108,24 +108,6 @@ // // key_expression // ], // }, -<<<<<<< HEAD -======= - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, - // ], - // }, - // ], - ->>>>>>> origin/main /// Configure internal transport parameters transport: { unicast: { @@ -214,10 +196,10 @@ /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. /// Higher values lead to a more aggressive batching but it will introduce additional latency. backoff: 100, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, }, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, }, /// Configure the zenoh RX parameters of a link rx: { @@ -278,6 +260,11 @@ known_keys_file: null, }, }, + acl: { + enabled: true, + default_deny: true, + policy_file: "rules_test.json5", + }, }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 8d1a5dbc0f..fce2fe1856 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -211,3 +211,13 @@ impl Default for SharedMemoryConf { Self { enabled: false } } } + +impl Default for AclConfig { + fn default() -> Self { + Self { + enabled: Some(true), + default_deny: Some(true), + policy_file: None, + } + } +} diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index eba3a4aa55..46422c5e25 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -70,33 +70,12 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; -#[derive(Debug, Deserialize, Serialize, Clone)] -#[serde(rename_all = "lowercase")] -pub enum DownsamplingFlow { - Egress, - Ingress, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct DownsamplingRuleConf { - /// A list of key-expressions to which the downsampling will be applied. - /// Downsampling will be applied for all key extensions if the parameter is None - pub key_expr: OwnedKeyExpr, - /// The maximum frequency in Hertz; - pub rate: f64, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct DownsamplingItemConf { - /// A list of interfaces to which the downsampling will be applied - /// Downsampling will be applied for all interfaces if the parameter is None - pub interfaces: Option>, - /// A list of interfaces to which the downsampling will be applied. - pub rules: Vec, - /// Downsampling flow direction: egress, ingress - pub flow: DownsamplingFlow, -} - +// #[derive(Debug, Deserialize, Serialize, Clone)] +// pub struct AclConfig { +// pub enabled: Option, +// pub default_deny: Option, +// pub policy_file: Option, +// } pub trait ConfigValidator: Send + Sync { fn check_config( &self, @@ -411,6 +390,12 @@ validated_struct::validator! { known_keys_file: Option, }, }, + pub acl: AclConfig { + pub enabled: Option, + pub default_deny: Option, + pub policy_file: Option, + } + }, /// Configuration of the admin space. pub adminspace: #[derive(Default)] @@ -432,10 +417,6 @@ validated_struct::validator! { }, }, - - /// Configuration of the downsampling. - downsampling: Vec, - /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) diff --git a/pub_config.json5 b/pub_config.json5 index ab4cbc4e21..e8ab4cad62 100644 --- a/pub_config.json5 +++ b/pub_config.json5 @@ -106,6 +106,13 @@ // // ], // // }, // /// Configure internal transport parameters + transport: { + acl: { + enabled: false, + default_deny: false, + policy_file: null, + }, + } // transport: { // unicast: { // /// Timeout in milliseconds when opening a link diff --git a/rules.json5 b/rules.json5 deleted file mode 100644 index 0314ec8a38..0000000000 --- a/rules.json5 +++ /dev/null @@ -1,47 +0,0 @@ -[ - { - "sub": { - "id": "aaa3b411006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "demo/example/**", - "action": "Write", - "permission": false - }, - { - "sub": { - "id": "aaa3b411006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "demo/example/zenoh-rs-pub", - "action": "Write", - "permission": true - }, - { - "sub": { - "id": "bbb3b411006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "demo/example/zenoh-rs-pub", - "action": "Read", - "permission": true - }, - { - "sub": { - "id": "aaabbb11006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "demo/example/**", - "action": "Read", - "permission": true - }, - { - "sub": { - "id": "aaabbb11006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "demo/example/**", - "action": "Write", - "permission": true - } -] \ No newline at end of file diff --git a/rules_test.json5 b/rules_test.json5 new file mode 100644 index 0000000000..8b133f6fcf --- /dev/null +++ b/rules_test.json5 @@ -0,0 +1,68 @@ +{ + "policy_definition": "UserID", + "rules": [ + { + "attribute_name": "UserID", + "attribute_rules": [ + { + "sub": "aaa3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "bbb3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + } + ] + }, + { + "attribute_name": "NetworkType", + "attribute_rules": [ + { + "sub": "wifi", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "wifi", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + }, + { + "attribute_name": "location", + "attribute_rules": [ + { + "sub": "location_1", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "location_2", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + } + ] +} \ No newline at end of file diff --git a/rules_test_thr.json5 b/rules_test_thr.json5 deleted file mode 100644 index 9161937bc7..0000000000 --- a/rules_test_thr.json5 +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "sub": { - "id": "aaa3b411006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": { - "id": "bbb3b411006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": { - "id": "aaabbb11006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": { - "id": "aaabbb11006ad57868988f9fec672a31", - "attributes": null - }, - "ke": "test/thr", - "action": "Write", - "permission": true - } -] \ No newline at end of file diff --git a/sub_config.json5 b/sub_config.json5 index b163e2b4f2..f50d34e460 100644 --- a/sub_config.json5 +++ b/sub_config.json5 @@ -29,6 +29,13 @@ // "/
" ], }, + transport: { + acl: { + enabled: false, + default_deny: false, + policy_file: null, + }, + } // /// Configure the scouting mechanisms and their behaviours // scouting: { // /// In client mode, the period dedicated to scouting for a router before failing diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index a2205f30c8..ba74117929 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -95,7 +95,6 @@ zenoh-collections = { workspace = true, features = ["std"] } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-crypto = { workspace = true } -zenoh-keyexpr = { workspace = true } zenoh-link = { workspace = true } zenoh-macros = { workspace = true } zenoh-plugin-trait = { workspace = true } @@ -105,14 +104,14 @@ zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } -fr-trie = "*" +zenoh-keyexpr = { workspace = true } +ipnetwork = "0.20.0" rustc-hash = "1.1.0" [build-dependencies] rustc_version = { workspace = true } [dependencies.bitflags] version = "2.4.2" - [lib] name = "zenoh" diff --git a/zenoh/src/net/routing/interceptor/old_authz.rs b/zenoh/src/net/routing/interceptor/old_authz.rs deleted file mode 100644 index 20d6f2a315..0000000000 --- a/zenoh/src/net/routing/interceptor/old_authz.rs +++ /dev/null @@ -1,426 +0,0 @@ -impl PolicyEnforcer { - pub fn new() -> ZResult { - // PolicyEnforcer - Ok(PolicyEnforcer(0, Vec::new())) - } - - pub fn init(&mut self) -> ZResult<()> { - /* - Initializes the policy for the control logic - loads policy into memory from file/network path - creates the policy hashmap with the ke-tries for ke matching - can have policy-type in the mix here...need to verify - */ - //set the policy type to 1,2,3 depending on the user input in the "rules" file - - let (rule_type, rule_set) = self.policy_resource_point("rules_test_thr.json5").unwrap(); - - self.build_policy_map(rule_type, rule_set) - .expect("policy not established"); - - /* setup a temporary variable here to hold all the values */ - //also should start the logger here - Ok(()) - } - - pub fn build_policy_map(&mut self, rule_type: u8, rule_set: Vec) -> ZResult<()> { - //convert vector of rules to a hashmap mapping sub-act to ketrie - /* - representaiton of policy list as a vector of hashmap of trees - each hashmap maps a subject (ID/atttribute) to a trie of allowed values - using fr-trie for now as a placeholder for key-matching - */ - self.0 = rule_type; - let policy_type = &mut self.1; - policy_type.push(Self::build_id_map(rule_set)); - - /* - match rule_type { - 1 => policy_type.push(Self::build_id_map(rule_set)), - - 2 => policy_type.push(Self::build_attribute_map(rule_set)), - 3 | 4 => { - policy_type.push(Self::build_id_map(rule_set.clone())); - policy_type.push(Self::build_attribute_map(rule_set)); - } - _ => bail!("bad entry for type"), - } - */ - Ok(()) - } - - pub fn build_id_map(rule_set: Vec) -> SubActPolicy { - let mut policy: SubActPolicy = FxHashMap::default(); - for v in rule_set { - // for now permission being false means this ke will not be inserted into the trie of allowed ke's - let perm = v.permission; - if !perm { - continue; - } - let sub = v.sub; - let ke = v.ke; - let action_flag = match v.action { - Action::Read => ActionFlag::Read, - Action::Write => ActionFlag::Write, - Action::None => ActionFlag::None, - Action::DeclareSub => ActionFlag::DeclareSub, - Action::Delete => ActionFlag::Delete, - Action::DeclareQuery => ActionFlag::DeclareQuery, - }; - let subact = SubAct(sub, action_flag.clone()); - //match subject to the policy hashmap - #[allow(clippy::map_entry)] - if !policy.contains_key(&subact) { - //create new entry for subject + ke-tree - let mut ketree = KeTreeFast::new(); - ketree.insert(keyexpr::new(&ke).unwrap(), action_flag); - policy.insert(subact, ketree); - } else { - let ketree = policy.get_mut(&subact).unwrap(); - ketree.insert(keyexpr::new(&ke).unwrap(), action_flag); - } - } - policy - } - - pub fn policy_enforcement_point( - &self, - reuqest_info: RequestInfo, - action: ActionFlag, - ) -> ZResult { - /* - input: new_context and action (sub,act for now but will need attribute values later) - output: allow/denyca - function: depending on the msg, builds the subject, builds the request, passes the request to policy_decision_point() - collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table - */ - let ke = reuqest_info.ke; - let zid = reuqest_info.zid; - let _attribute = reuqest_info.attributes; - - let subject = PolicySubject::Id(zid); - let request = Request { - sub: subject, - obj: ke.to_string(), - action, - }; - let decision = self.policy_decision_point(request); - Ok(decision) - } - - pub fn policy_decision_point(&self, request: Request) -> bool { - /* - input: (request) - output: true(allow)/false(deny) - function: process the request received from PEP against the policy (self) - the policy list is chosen based on the policy-type specified in the rules file - policy list is be a hashmap of subject->ketries (test and discuss) - */ - //compare the request to the vec of values...matching depends on the value of the policy type - return self.matching_algo(0, request); - - // false - } - - pub fn policy_information_point<'a>(&self, file_path: &'a str) -> ZResult { - let policy_definition = "userid and nettype"; //this will be the value deciding how matcher functions are called - let mut policy_rules: Vec = Vec::new(); - let userid_attr = AttributeRules { - attribute_name: "userid".to_owned(), - attribute_rules: [ - SinglePolicyRule { - sub: "aaa3b411006ad57868988f9fec672a31".to_string(), - ke: "test/thr".to_string(), - action: Action::Write, - permission: true, - }, - SinglePolicyRule { - sub: "bbb3b411006ad57868988f9fec672a31".to_string(), - ke: "test/thr".to_string(), - action: Action::Read, - permission: true, - }, - SinglePolicyRule { - sub: "aaabbb11006ad57868988f9fec672a31".to_string(), - ke: "test/thr".to_string(), - action: Action::Read, - permission: true, - }, - SinglePolicyRule { - sub: "aaabbb11006ad57868988f9fec672a31".to_string(), - ke: "test/thr".to_string(), - action: Action::Write, - permission: true, - }, - ] - .to_vec(), - }; - let nettype_attr = AttributeRules { - attribute_name: "networktype".to_owned(), - attribute_rules: [ - SinglePolicyRule { - sub: "wifi".to_string(), - ke: "test/thr".to_string(), - action: Action::Write, - permission: true, - }, - SinglePolicyRule { - sub: "wifi".to_string(), - ke: "test/thr".to_string(), - action: Action::Read, - permission: true, - }, - ] - .to_vec(), - }; - let location_attr = AttributeRules { - attribute_name: "location".to_owned(), - attribute_rules: [ - SinglePolicyRule { - sub: "location_1".to_string(), - ke: "test/thr".to_string(), - action: Action::Write, - permission: true, - }, - SinglePolicyRule { - sub: "location_2".to_string(), - ke: "test/thr".to_string(), - action: Action::Read, - permission: true, - }, - ] - .to_vec(), - }; - - /* - for example, if policy_defintiion = "userid and nettype" - our request will be 2 different calls to pdp with different rewuest values - so we will get val1= matcher_function(sub=userid...), val2=matcher_function(sub=nettype...) - and our policy function will be val1 and val2 (from "userid and nettype" given in the policy_defintiion) - and matcher function be mathcer_function(request) - in our pdp, we will call matcher("subval") - */ - /* - get values from json str?? - */ - policy_rules.push(userid_attr); - policy_rules.push(nettype_attr); - policy_rules.push(location_attr); - - let list_of_attributes = vec![ - "userid".to_string(), - "networktype".to_string(), - "location".to_string(), - ]; - let userid_enabled = true; - let networktype_enabled = true; - - Ok(PolicyInformation { - policydefition: policy_definition.to_owned(), - userid_enabled: true, - networktype_enabled: true, - attribute_list: list_of_attributes, - policy_rules, - }) - } - - pub fn policy_resource_point(&self, file_path: &str) -> ZResult<(u8, Vec)> { - /* - input: path to rules.json file - output: loads the appropriate policy into the memory and returns back a vector of rules and the policy type as specified in the file; - * might also be the point to select AC type (ACL, ABAC etc)?? * - */ - let policytype: u8 = 1; - let _policy_string = [ - "userid", - "attribute", - "userid or attribute", - "userid and attribute", - ]; //if it is - //match config value against it - //if it doesn't match, abort. else use that index henceforth - - let vec_ids = [ - "aaa3b411006ad57868988f9fec672a31", - "bbb3b411006ad57868988f9fec672a31", - "aaabbb11006ad57868988f9fec672a31", - "aaabbb11006ad57868988f9fec672a31", - ]; - let vec_actions: Vec = - vec![Action::Write, Action::Read, Action::Write, Action::Read]; - let mut policyrules: Vec = Vec::new(); - - let wc_keys: [&str; 16] = [ - "test/demo/**/example/**", - "test/demo/example", - "test/demo/example", - "test/*/example", - "**/example", - "**/e", - "e/demo/example", - "**/demo/example", - "**/@example/**", - "**", - "**/@example", - "@example/**", - "@example/a", - "test/@example", - "demo/a$*a/demo/bb", - "**/b$*/ee", - ]; - - let no_wc_keys: [&str; 100] = [ - "test/example/a/b", - "test/b/example/a/bone/activity/basin", - "some/demo/test/a", - "some/birth/example/a/bone/bit/airplane", - "some/demo/test/a/blood", - "test/example/a", - "test/authority/example/a/acoustics/board", - "some/b/example/a", - "some/room/example/net", - "test/example/a/ants/humidity", - "some/account/example/a/argument/humidity", - "test/b/example/a/info/birthday", - "test/b/example/org/info/humidity", - "some/info/example/net", - "some/demo/test/a", - "test/amusement/example/a/angle", - "some/b/example/a/baseball", - "test/b/example/org", - "some/b/example/a", - "test/b/example/a/basket/humidity", - "test/example/a", - "test/b/example/net/army/aunt/d", - "some/appliance/example/net/box", - "some/b/example/org/number", - "some/example/net/beginner/d", - "some/birthday/example/net", - "test/believe/example/a/battle", - "test/b/example/org/baseball/speedb", - "some/basket/example/a", - "some/b/example/net/birds", - "some/demo/test/a", - "test/bear/example/a/blow", - "test/b/example/net", - "some/demo/test/a/achiever/action", - "test/b/example/net", - "test/b/example/a", - "test/b/example/a/believe/temp", - "test/example/a/basketball", - "test/example/a/afternoon/d/bells", - "test/example/a/bubble/brick", - "test/b/example/a", - "test/boot/example/org/boat/board", - "test/b/example/a", - "test/room/example/a/c/d", - "some/b/example/org", - "some/b/example/a/box/book/temp", - "some/b/example/a/adjustment/temp", - "some/example/net/belief/afternoon", - "test/b/example/a/activity/info", - "some/b/example/org/sensor/arm", - "some/zenoh/example/org/bead/bridge", - "test/brother/example/a/bath", - "test/example/a", - "test/example/a/sensor", - "some/back/example/a/balance/bird/humidity", - "test/zenoh/example/a/box/action/humidity", - "test/b/example/a", - "some/demo/test/a/bedroom/temp", - "some/b/example/a/ball/humidity", - "test/airplane/example/a/art/animal", - "some/example/net", - "test/b/example/a", - "some/demo/test/a/baseball/achiever", - "some/demo/test/a/berry/arch/temp", - "test/arithmetic/example/a/basket", - "some/example/net/art/bikes/humidity", - "some/demo/test/a/bedroom", - "some/demo/test/a", - "some/appliance/example/a", - "test/b/example/a", - "test/b/example/a/agreement", - "some/example/net/bird/sound", - "test/b/example/a/argument/info/basket", - "some/b/example/a/balance/boundary", - "some/arch/example/a/argument", - "some/demo/test/a/zenoh/brake", - "test/b/example/a/bath/brass", - "some/anger/example/net", - "test/b/example/a/boat/humidity", - "some/demo/test/a/b/c", - "test/b/example/a/brother/temp", - "test/b/example/a", - "some/b/example/a", - "test/b/example/org", - "some/b/example/a/amount/b", - "some/b/example/org/heat/humidity", - "some/demo/test/a", - "some/b/example/edu/activity", - "some/argument/example/a/suggest/humidity", - "test/example/a/believe/anger/humidity", - "test/b/example/a/sensor/b/c", - "test/example/edu/agreement", - "test/example/org", - "some/demo/test/a", - "test/b/example/a/airplane/wing", - "test/b/example/a", - "some/b/example/net/beef/bedroom/temp", - "test/b/example/a/blade/angle", - "some/b/example/c/d", - "test/b/example/a", - ]; - - //valid one that works - for i in 0..4 { - policyrules.push(PolicyRule { - sub: PolicySubject::Id(ZenohId::from_str(vec_ids[i]).unwrap()), - ke: "test/thr".to_string(), - action: vec_actions.get(i).unwrap().clone(), - permission: true, - }); - } - - for i in 0..4 { - for j in no_wc_keys { - policyrules.push(PolicyRule { - sub: PolicySubject::Id(ZenohId::from_str(vec_ids[i]).unwrap()), - ke: j.to_string(), - action: vec_actions.get(i).unwrap().clone(), - permission: true, - }); - } - } - - //list of attributes from the file - - println!("policy rules : {:?}", policyrules); - - //get value from the file - let file_input = { - let file_content = - fs::read_to_string("rules_test_thr.json").expect("error reading file"); - serde_json::from_str::(&file_content).expect("error serializing to JSON") - }; - println!("file input {}", file_input); - - Ok((policytype, policyrules)) - } - - fn matching_algo(&self, matching_type: usize, request: Request) -> bool { - //return true; - let ke = request.obj; - let sub = request.sub; - let action = request.action; - let subact = SubAct(sub, action); - - match self.1[matching_type].get(&subact) { - Some(ktrie) => { - let result = ktrie.nodes_including(keyexpr::new(&ke).unwrap()).count(); - result != 0 - } - None => false, - } - } -} From 45eac358b7f59388c57884d1b2593a22846f4f6c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 20 Feb 2024 12:46:44 +0100 Subject: [PATCH 052/122] WIP:adding config conditions --- commons/zenoh-config/src/defaults.rs | 4 +- commons/zenoh-config/src/lib.rs | 6 -- zenoh/src/net/routing/interceptor/authz.rs | 88 ++++++++++------------ zenoh/src/net/routing/interceptor/mod.rs | 28 ++++--- 4 files changed, 58 insertions(+), 68 deletions(-) diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index fce2fe1856..38f794df57 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -215,8 +215,8 @@ impl Default for SharedMemoryConf { impl Default for AclConfig { fn default() -> Self { Self { - enabled: Some(true), - default_deny: Some(true), + enabled: Some(false), + default_deny: Some(false), policy_file: None, } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 46422c5e25..9ccd3de7d6 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -70,12 +70,6 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; -// #[derive(Debug, Deserialize, Serialize, Clone)] -// pub struct AclConfig { -// pub enabled: Option, -// pub default_deny: Option, -// pub policy_file: Option, -// } pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 617237461c..20acfa719d 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -91,7 +91,7 @@ pub struct AttributeRules { #[derive(Clone, Debug, Deserialize)] pub struct AttributeRule { - sub: Attribute, //changed from string + sub: Attribute, ke: String, action: Action, permission: bool, @@ -99,7 +99,6 @@ pub struct AttributeRule { use zenoh_config::ZenohId; #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -//#[serde(tag = "sub")] #[serde(untagged)] pub enum Attribute { UserID(ZenohId), @@ -107,7 +106,7 @@ pub enum Attribute { MetadataType(String), //clarify } #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -pub struct SubAct(Attribute, Action); //changed from String to Attribute +pub struct SubAct(Attribute, Action); #[derive(Debug)] pub struct RequestInfo { @@ -117,13 +116,13 @@ pub struct RequestInfo { } impl PolicyEnforcer { - pub fn new() -> ZResult { - Ok(PolicyEnforcer { + pub fn new() -> PolicyEnforcer { + PolicyEnforcer { acl_enabled: true, default_deny: true, attribute_list: None, policy_list: None, - }) + } } pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { /* @@ -132,21 +131,29 @@ impl PolicyEnforcer { creates the policy hashmap with the ke-tries for ke matching can have policy-type in the mix here...need to verify */ - self.acl_enabled = acl_config.enabled.unwrap(); - self.default_deny = acl_config.default_deny.unwrap(); - let file_path = acl_config.policy_file.unwrap(); - let policy_information = self.policy_resource_point(&file_path)?; - self.attribute_list = Some(policy_information.attribute_list); - let _policy_definition = policy_information.policy_definition; - - //create policy_list for sub|act:ke from the info we have - - self.build_policy_map( - self.attribute_list.clone().unwrap(), - policy_information.policy_rules, - ) - .expect("policy not established"); - //logger should start here + match acl_config.enabled { + Some(val) => self.acl_enabled = val, + None => log::error!("acl config not setup"), + } + match acl_config.default_deny { + Some(val) => self.default_deny = val, + None => log::error!("error default_deny not setup"), + } + if self.acl_enabled { + match acl_config.policy_file { + Some(file_path) => { + let policy_information = self.policy_resource_point(&file_path)?; + self.attribute_list = Some(policy_information.attribute_list); + let _policy_definition = policy_information.policy_definition; + self.build_policy_map( + self.attribute_list.clone().unwrap(), + policy_information.policy_rules, + )?; + log::info!("policy map was created successfully"); + } + None => log::error!("no policy file path was specified"), + } + } Ok(()) } pub fn build_policy_map( @@ -156,7 +163,7 @@ impl PolicyEnforcer { ) -> ZResult<()> { /* representaiton of policy list as a vector of hashmap of trees - each hashmap maps a subject (ID/atttribute) to a trie of allowed values + each hashmap maps a subact (ID/Atttribute + Action) to a trie of allowed values */ //for each attrribute in the list, get rules, create map and push into rules_vector let mut pm: Vec = Vec::new(); @@ -179,16 +186,15 @@ impl PolicyEnforcer { let sub = v.sub; let ke = v.ke; let subact = SubAct(sub, v.action); - //match subject to the policy hashmap - #[allow(clippy::map_entry)] - if !policy.contains_key(&subact) { + let subact_value_exists = policy.contains_key(&subact); + if subact_value_exists { + let ketree = policy.get_mut(&subact).unwrap(); + ketree.insert(keyexpr::new(&ke)?, true); + } else { //create new entry for subject + ke-tree let mut ketree = KeTreeRule::new(); ketree.insert(keyexpr::new(&ke)?, true); policy.insert(subact, ketree); - } else { - let ketree = policy.get_mut(&subact).unwrap(); - ketree.insert(keyexpr::new(&ke)?, true); } } Ok(policy) @@ -234,39 +240,25 @@ impl PolicyEnforcer { pub fn policy_enforcement_point(&self, request_info: RequestInfo) -> ZResult { /* - input: new_context and action (sub,act for now but will need attribute values later) - output: allow/denyca q - function: depending on the msg, builds the subject, builds the request, passes the request to policy_decision_point() - collects result from PDP and then uses that allow/deny output to block or pass the msg to routing table + input: request_info from interceptor + output: decision = allow/deny permission [true/false] + function: builds the request and passes it to policy_decision_point() + collects results (for each attribute in subject list) from PDP + and then uses that to drop or pass the msg to routing table */ - /* - for example, if policy_defintiion = "userid and nettype" - our request will be 2 different calls to pdp with different rewuest values - so we will get val1= matcher_function(sub=userid...), val2=matcher_function(sub=nettype...) - and our policy function will be val1 and val2 (from "userid and nettype" given in the policy_defintiion) - and matcher function be mathcer_function(request) - in our pdp, we will call matcher("subval") - */ - //return Ok(true); - // println!("request info: {:?}", request_info); let obj = request_info.ke; let mut decisions: Vec = Vec::new(); //to store all decisions for each subject in list - // let subject_list = request_info.sub.0; - - // loop through the attributes and store decision for each for (attribute_index, val) in request_info.sub.into_iter().enumerate() { - // val.0 is attribute name, val.1 is attribute value //build request let request = RequestBuilder::new() .sub(val) .obj(obj.clone()) .action(request_info.action.clone()) .build()?; - decisions.push(self.policy_decision_point(attribute_index, request)); } - let decision: bool = decisions[0]; //should run a function over the decisons vector + let decision: bool = decisions[0]; //only checks for single attribute right now Ok(decision) } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 2c7fafd020..8dece4f1c6 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -212,19 +212,23 @@ pub(crate) fn interceptor_factories(config: &Config) -> Vec //get acl config let acl_config = config.transport().acl().clone(); //get this gracefully - if acl_config.enabled.unwrap() { - let mut policy_enforcer = PolicyEnforcer::new().unwrap(); + let mut acl_enabled = false; + match acl_config.enabled { + Some(val) => acl_enabled = val, + None => { + log::warn!("acl config not setup"); + } + } + if acl_enabled { + let mut policy_enforcer = PolicyEnforcer::new(); match policy_enforcer.init(acl_config) { - Ok(_) => { - println!( - "setup acl intercept with {:?}", - policy_enforcer.get_attribute_list().unwrap() - ); - res.push(Box::new(AclEnforcer { - e: Arc::new(policy_enforcer), - })) - } - Err(e) => log::error!("access control not initialized with error {}!", e), + Ok(_) => res.push(Box::new(AclEnforcer { + e: Arc::new(policy_enforcer), + })), + Err(e) => log::error!( + "access control enabled but not initialized with error {}!", + e + ), } } res From 264f86121a8e6c4f0e4357e07684b471b3e6f536 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 20 Feb 2024 14:18:04 +0100 Subject: [PATCH 053/122] WIP:merging with DS --- commons/zenoh-config/src/lib.rs | 32 +++- .../{accessintercept.rs => accesscontrol.rs} | 0 .../net/routing/interceptor/downsampling.rs | 166 ++++++++++++++++++ zenoh/src/net/routing/interceptor/mod.rs | 4 +- 4 files changed, 199 insertions(+), 3 deletions(-) rename zenoh/src/net/routing/interceptor/{accessintercept.rs => accesscontrol.rs} (100%) create mode 100644 zenoh/src/net/routing/interceptor/downsampling.rs diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 9ccd3de7d6..57df392896 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -70,6 +70,33 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; +#[derive(Debug, Deserialize, Serialize, Clone)] +#[serde(rename_all = "lowercase")] +pub enum DownsamplingFlow { + Egress, + Ingress, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct DownsamplingRuleConf { + /// A list of key-expressions to which the downsampling will be applied. + /// Downsampling will be applied for all key extensions if the parameter is None + pub key_expr: OwnedKeyExpr, + /// The maximum frequency in Hertz; + pub rate: f64, +} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct DownsamplingItemConf { + /// A list of interfaces to which the downsampling will be applied + /// Downsampling will be applied for all interfaces if the parameter is None + pub interfaces: Option>, + /// A list of interfaces to which the downsampling will be applied. + pub rules: Vec, + /// Downsampling flow direction: egress, ingress + pub flow: DownsamplingFlow, +} + pub trait ConfigValidator: Send + Sync { fn check_config( &self, @@ -389,7 +416,6 @@ validated_struct::validator! { pub default_deny: Option, pub policy_file: Option, } - }, /// Configuration of the admin space. pub adminspace: #[derive(Default)] @@ -411,6 +437,10 @@ validated_struct::validator! { }, }, + + /// Configuration of the downsampling. + downsampling: Vec, + /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. plugins_search_dirs: Vec, // TODO (low-prio): Switch this String to a PathBuf? (applies to other paths in the config as well) diff --git a/zenoh/src/net/routing/interceptor/accessintercept.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs similarity index 100% rename from zenoh/src/net/routing/interceptor/accessintercept.rs rename to zenoh/src/net/routing/interceptor/accesscontrol.rs diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs new file mode 100644 index 0000000000..765dab8925 --- /dev/null +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -0,0 +1,166 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) + +use crate::net::routing::interceptor::*; +use std::sync::{Arc, Mutex}; +use zenoh_config::{DownsamplingFlow, DownsamplingItemConf, DownsamplingRuleConf}; +use zenoh_core::zlock; +use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; +use zenoh_keyexpr::keyexpr_tree::IKeyExprTreeMut; +use zenoh_keyexpr::keyexpr_tree::{support::UnknownWildness, KeBoxTree}; +use zenoh_protocol::network::NetworkBody; +use zenoh_result::ZResult; + +pub(crate) fn downsampling_interceptor_factories( + config: &Vec, +) -> ZResult> { + let mut res: Vec = vec![]; + + for ds in config { + res.push(Box::new(DownsamplingInterceptorFactory::new(ds.clone()))); + } + + Ok(res) +} + +pub struct DownsamplingInterceptorFactory { + interfaces: Option>, + rules: Vec, + flow: DownsamplingFlow, +} + +impl DownsamplingInterceptorFactory { + pub fn new(conf: DownsamplingItemConf) -> Self { + Self { + interfaces: conf.interfaces, + rules: conf.rules, + flow: conf.flow, + } + } +} + +impl InterceptorFactoryTrait for DownsamplingInterceptorFactory { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option) { + log::debug!("New downsampler transport unicast {:?}", transport); + if let Some(interfaces) = &self.interfaces { + log::debug!( + "New downsampler transport unicast config interfaces: {:?}", + interfaces + ); + if let Ok(links) = transport.get_links() { + for link in links { + log::debug!( + "New downsampler transport unicast link interfaces: {:?}", + link.interfaces + ); + if !link.interfaces.iter().any(|x| interfaces.contains(x)) { + return (None, None); + } + } + } + }; + + match self.flow { + DownsamplingFlow::Ingress => ( + Some(Box::new(DownsamplingInterceptor::new(self.rules.clone()))), + None, + ), + DownsamplingFlow::Egress => ( + None, + Some(Box::new(DownsamplingInterceptor::new(self.rules.clone()))), + ), + } + } + + fn new_transport_multicast( + &self, + _transport: &TransportMulticast, + ) -> Option { + None + } + + fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { + None + } +} + +struct Timestate { + pub threshold: std::time::Duration, + pub latest_message_timestamp: std::time::Instant, +} + +pub(crate) struct DownsamplingInterceptor { + ke_state: Arc>>, +} + +impl InterceptorTrait for DownsamplingInterceptor { + fn intercept( + &self, + ctx: RoutingContext, + ) -> Option> { + if matches!(ctx.msg.body, NetworkBody::Push(_)) { + if let Some(key_expr) = ctx.full_key_expr() { + let mut ke_state = zlock!(self.ke_state); + if let Some(state) = ke_state.weight_at_mut(&key_expr.clone()) { + let timestamp = std::time::Instant::now(); + + if timestamp - state.latest_message_timestamp >= state.threshold { + state.latest_message_timestamp = timestamp; + return Some(ctx); + } else { + return None; + } + } + } + } + + Some(ctx) + } +} + +const NANOS_PER_SEC: f64 = 1_000_000_000.0; + +impl DownsamplingInterceptor { + pub fn new(rules: Vec) -> Self { + let mut ke_state = KeBoxTree::default(); + for rule in rules { + let mut threshold = std::time::Duration::MAX; + let mut latest_message_timestamp = std::time::Instant::now(); + if rule.rate != 0.0 { + threshold = + std::time::Duration::from_nanos((1. / rule.rate * NANOS_PER_SEC) as u64); + latest_message_timestamp -= threshold; + } + ke_state.insert( + &rule.key_expr, + Timestate { + threshold, + latest_message_timestamp, + }, + ); + } + Self { + ke_state: Arc::new(Mutex::new(ke_state)), + } + } +} diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 8dece4f1c6..b2135b2b47 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -163,12 +163,12 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) //! -mod accessintercept; +mod accesscontrol; mod authz; use std::sync::Arc; use super::RoutingContext; -use crate::net::routing::interceptor::{accessintercept::AclEnforcer, authz::PolicyEnforcer}; +use crate::net::routing::interceptor::{accesscontrol::AclEnforcer, authz::PolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; From 763609086db352d3033fb6918903f6ef2ebcfb78 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 15:47:09 +0100 Subject: [PATCH 054/122] WIP:moved rules into config file --- Cargo.lock | 2 - DEFAULT_CONFIG.json5 | 69 +++++++++++++++- commons/zenoh-config/src/defaults.rs | 3 +- commons/zenoh-config/src/lib.rs | 44 ++++++++++- pub_config.json5 | 2 +- sub_config.json5 | 2 +- zenoh/Cargo.toml | 5 +- zenoh/src/net/routing/interceptor/authz.rs | 92 ++++++++++++++++------ 8 files changed, 184 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 916574adcd..ece93e040b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4568,7 +4568,6 @@ dependencies = [ "async-std", "async-trait", "base64 0.21.4", - "bitflags 2.4.2", "const_format", "env_logger", "event-listener 4.0.0", @@ -4577,7 +4576,6 @@ dependencies = [ "futures", "git-version", "hex", - "ipnetwork", "lazy_static", "log", "ordered-float", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index e7ac6b74b7..1dc1c7d097 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -263,7 +263,74 @@ acl: { enabled: true, default_deny: true, - policy_file: "rules_test.json5", + policy_list: { + "policy_definition": "UserID", + "rules": [ + { + "attribute_name": "UserID", + "attribute_rules": [ + { + "sub": "aaa3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "bbb3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + } + ] + }, + { + "attribute_name": "NetworkType", + "attribute_rules": [ + { + "sub": "wifi", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "wifi", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + }, + { + "attribute_name": "location", + "attribute_rules": [ + { + "sub": "location_1", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "location_2", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + } + ] + }, }, }, /// Configure the Admin Space diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 38f794df57..54c8a568a6 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -217,7 +217,8 @@ impl Default for AclConfig { Self { enabled: Some(false), default_deny: Some(false), - policy_file: None, + policy_list: None, } } } + diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 57df392896..648d0e5ede 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -97,6 +97,48 @@ pub struct DownsamplingItemConf { pub flow: DownsamplingFlow, } +//adding for authz structs + +#[derive(Serialize, Deserialize,Clone,Debug)] +pub struct PolicyList { + policy_definition: String, + rules: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AttributeRules { + attribute_name: String, + attribute_rules: Vec, +} + +#[derive(Clone, Serialize,Debug, Deserialize)] +pub struct AttributeRule { + sub: Attribute, + ke: String, + action: Action, + permission: bool, +} + +#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] +#[serde(untagged)] +pub enum Attribute { + UserID(ZenohId), + NetworkType(String), //clarify + MetadataType(String), //clarify +} + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +pub enum Action { + None, + Read, + Write, + DeclareSub, + Delete, + DeclareQuery, +} + + + pub trait ConfigValidator: Send + Sync { fn check_config( &self, @@ -414,7 +456,7 @@ validated_struct::validator! { pub acl: AclConfig { pub enabled: Option, pub default_deny: Option, - pub policy_file: Option, + pub policy_list: Option } }, /// Configuration of the admin space. diff --git a/pub_config.json5 b/pub_config.json5 index e8ab4cad62..2b9d95703f 100644 --- a/pub_config.json5 +++ b/pub_config.json5 @@ -110,7 +110,7 @@ acl: { enabled: false, default_deny: false, - policy_file: null, + policy_list: null, }, } // transport: { diff --git a/sub_config.json5 b/sub_config.json5 index f50d34e460..145fa33444 100644 --- a/sub_config.json5 +++ b/sub_config.json5 @@ -33,7 +33,7 @@ acl: { enabled: false, default_deny: false, - policy_file: null, + policy_list: null, }, } // /// Configure the scouting mechanisms and their behaviours diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index ba74117929..ee448ae7aa 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -105,12 +105,11 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-keyexpr = { workspace = true } -ipnetwork = "0.20.0" + rustc-hash = "1.1.0" [build-dependencies] rustc_version = { workspace = true } -[dependencies.bitflags] -version = "2.4.2" + [lib] name = "zenoh" diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 20acfa719d..5510fb5735 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,6 +1,6 @@ use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; -use std::fs; +//use std::fs; use std::hash::Hash; use zenoh_config::AclConfig; use zenoh_keyexpr::keyexpr; @@ -8,7 +8,6 @@ use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { - None, Read, Write, DeclareSub, @@ -29,6 +28,18 @@ pub struct RequestBuilder { action: Option, } +#[derive(Deserialize,Debug)] +pub struct GetPolicy { + policy_definition: String, + rules: Vec, +} + +// #[derive(Deserialize)] +// pub struct PolicyList { +// policy_definition: String, +// rules: Vec, +// } + type KeTreeRule = KeBoxTree; impl RequestBuilder { @@ -140,18 +151,18 @@ impl PolicyEnforcer { None => log::error!("error default_deny not setup"), } if self.acl_enabled { - match acl_config.policy_file { - Some(file_path) => { - let policy_information = self.policy_resource_point(&file_path)?; + match acl_config.policy_list { + Some(policy_list) => { + let policy_information = self.policy_information_point(policy_list)?; self.attribute_list = Some(policy_information.attribute_list); let _policy_definition = policy_information.policy_definition; self.build_policy_map( self.attribute_list.clone().unwrap(), policy_information.policy_rules, )?; - log::info!("policy map was created successfully"); + log::info!("policy enforcer was initialised successfully"); } - None => log::error!("no policy file path was specified"), + None => log::error!("no policy list was specified"), } } Ok(()) @@ -175,6 +186,7 @@ impl PolicyEnforcer { Ok(()) } + pub fn get_rules_list(&self, rule_set: Vec) -> ZResult { let mut policy: SubActPolicy = FxHashMap::default(); for v in rule_set { @@ -200,26 +212,19 @@ impl PolicyEnforcer { Ok(policy) } - pub fn policy_resource_point(&self, file_path: &str) -> ZResult { - //read file - #[derive(Deserialize)] - struct GetPolicyFile { - policy_definition: String, - rules: Vec, - } + //if policy_list exists, get that value directly and use it for get policy - let policy_file_info: GetPolicyFile = { - let data = fs::read_to_string(file_path).expect("error reading file"); - serde_json::from_str(&data).expect("error parsing from json to struct") - }; + pub fn policy_information_point(&self, policy_list : zenoh_config::PolicyList)->ZResult{ - //get the rules mentioned in the policy definition - let enforced_attributes = policy_file_info + //let policy_list_info: GetPolicy;// = GetPolicy{ + let value = serde_json::to_value(&policy_list).unwrap(); + let policy_list_info: GetPolicy = serde_json::from_value(value)?; + let enforced_attributes = policy_list_info .policy_definition .split(' ') .collect::>(); - let complete_ruleset = policy_file_info.rules; + let complete_ruleset = policy_list_info.rules; let mut attribute_list: Vec = Vec::new(); let mut policy_rules: Vec = Vec::new(); for rule in complete_ruleset.iter() { @@ -229,14 +234,47 @@ impl PolicyEnforcer { } } - let policy_definition = policy_file_info.policy_definition; + let policy_definition = policy_list_info.policy_definition; Ok(PolicyInformation { policy_definition, attribute_list, policy_rules, - }) + }) + } + // pub fn policy_resource_point(&self, file_path: &str) -> ZResult { + // //read file + + // let policy_file_info: GetPolicy = { + // let data = fs::read_to_string(file_path).expect("error reading file"); + // serde_json::from_str(&data).expect("error parsing from json to struct") + // }; + + // //get the rules mentioned in the policy definition + // let enforced_attributes = policy_file_info + // .policy_definition + // .split(' ') + // .collect::>(); + + // let complete_ruleset = policy_file_info.rules; + // let mut attribute_list: Vec = Vec::new(); + // let mut policy_rules: Vec = Vec::new(); + // for rule in complete_ruleset.iter() { + // if enforced_attributes.contains(&rule.attribute_name.as_str()) { + // attribute_list.push(rule.attribute_name.clone()); + // policy_rules.push(rule.clone()) + // } + // } + + // let policy_definition = policy_file_info.policy_definition; + + // Ok(PolicyInformation { + // policy_definition, + // attribute_list, + // policy_rules, + // }) + // } pub fn policy_enforcement_point(&self, request_info: RequestInfo) -> ZResult { /* @@ -248,6 +286,7 @@ impl PolicyEnforcer { */ let obj = request_info.ke; + let mut decision = true; let mut decisions: Vec = Vec::new(); //to store all decisions for each subject in list for (attribute_index, val) in request_info.sub.into_iter().enumerate() { //build request @@ -256,9 +295,12 @@ impl PolicyEnforcer { .obj(obj.clone()) .action(request_info.action.clone()) .build()?; - decisions.push(self.policy_decision_point(attribute_index, request)); + let d = self.policy_decision_point(attribute_index, request); + decisions.push(d); + decision = decision & d; } - let decision: bool = decisions[0]; //only checks for single attribute right now + + //let decision: bool = decisions.iter().map(|d,x=true|x=d&x);// decisions[0]; //only checks for single attribute right now Ok(decision) } From 99c2efac53b0a01ad38619a4d4154ca67f05e3b8 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 17:04:10 +0100 Subject: [PATCH 055/122] WIP:moved rules into config file --- DEFAULT_CONFIG.json5 | 30 +++++++++++-------- commons/zenoh-config/src/lib.rs | 11 +++---- .../net/routing/interceptor/accesscontrol.rs | 27 ++++------------- zenoh/src/net/routing/interceptor/authz.rs | 25 +++++----------- 4 files changed, 35 insertions(+), 58 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 1dc1c7d097..28a50b586d 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -264,11 +264,11 @@ enabled: true, default_deny: true, policy_list: { - "policy_definition": "UserID", - "rules": [ + "policy_definition": "UserId", + "ruleset": [ { - "attribute_name": "UserID", - "attribute_rules": [ + "attribute": "UserId", + "rules": [ { "sub": "aaa3b411006ad57868988f9fec672a31", "ke": "test/thr", @@ -281,6 +281,12 @@ "action": "Read", "permission": true }, + { + "sub": "bbb3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, { "sub": "aaabbb11006ad57868988f9fec672a31", "ke": "test/thr", @@ -296,16 +302,16 @@ ] }, { - "attribute_name": "NetworkType", - "attribute_rules": [ + "attribute": "NetworkType", + "rules": [ { - "sub": "wifi", + "sub": "wlan0", "ke": "test/thr", "action": "Write", "permission": true }, { - "sub": "wifi", + "sub": "wlan0", "ke": "test/thr", "action": "Read", "permission": true @@ -313,16 +319,16 @@ ] }, { - "attribute_name": "location", - "attribute_rules": [ + "attribute": "user_defined_attribute", + "rules": [ { - "sub": "location_1", + "sub": "value_1", "ke": "test/thr", "action": "Write", "permission": true }, { - "sub": "location_2", + "sub": "value_2", "ke": "test/thr", "action": "Read", "permission": true diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 648d0e5ede..f8986a6eb2 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -102,13 +102,13 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Deserialize,Clone,Debug)] pub struct PolicyList { policy_definition: String, - rules: Vec, + ruleset: Vec, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct AttributeRules { - attribute_name: String, - attribute_rules: Vec, + attribute: String, + rules: Vec, } #[derive(Clone, Serialize,Debug, Deserialize)] @@ -122,14 +122,13 @@ pub struct AttributeRule { #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] pub enum Attribute { - UserID(ZenohId), + UserId(ZenohId), NetworkType(String), //clarify MetadataType(String), //clarify } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { - None, Read, Write, DeclareSub, @@ -137,8 +136,6 @@ pub enum Action { DeclareQuery, } - - pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index d39f255ef8..e4014e8cea 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -65,35 +65,24 @@ impl InterceptorTrait for IngressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - //intercept msg and send it to PEP if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { let e = &self.e; - let ke = ctx.full_expr().unwrap(); - let network_type = "wifi"; - - // let ke = "test/thr"; //for testing - - //create the subject list from given values - //get attribute list - //iterate and get all values from the attribute list for the request - + let network_type = "wlan0"; //for testing let mut sub_info: Vec = Vec::new(); let attribute_list = e.get_attribute_list().unwrap(); for i in attribute_list { - // println!("list runs once"); match i.as_str() { - "UserID" => sub_info.push(Attribute::UserID(self.zid)), + "UserId" => sub_info.push(Attribute::UserId(self.zid)), "NetworkType" => sub_info.push(Attribute::NetworkType(network_type.to_owned())), - _ => { //other metadata values}, + _ => { //other metadata values } } } - let request_info = RequestInfo { sub: sub_info, ke: ke.to_string(), @@ -123,7 +112,6 @@ impl InterceptorTrait for EgressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - // intercept msg and send it to PEP if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -131,27 +119,22 @@ impl InterceptorTrait for EgressAclEnforcer { { let e = &self.e; let ke = ctx.full_expr().unwrap(); - let network_type = "wifi"; //for testing - - // let ke = "test/thr"; //for testing - + let network_type = "wlan0"; //for testing let mut sub_info: Vec = Vec::new(); let attribute_list = e.get_attribute_list().unwrap(); for i in attribute_list { match i.as_str() { - "UserID" => sub_info.push(Attribute::UserID(self.zid)), + "UserId" => sub_info.push(Attribute::UserId(self.zid)), "NetworkType" => sub_info.push(Attribute::NetworkType(network_type.to_owned())), _ => { //other metadata values, } } } - let request_info = RequestInfo { sub: sub_info, ke: ke.to_string(), action: Action::Read, }; - match e.policy_enforcement_point(request_info) { Ok(decision) => { if !decision { diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 5510fb5735..4c56735dd6 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,6 +1,5 @@ use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; -//use std::fs; use std::hash::Hash; use zenoh_config::AclConfig; use zenoh_keyexpr::keyexpr; @@ -31,15 +30,8 @@ pub struct RequestBuilder { #[derive(Deserialize,Debug)] pub struct GetPolicy { policy_definition: String, - rules: Vec, + ruleset: Vec, } - -// #[derive(Deserialize)] -// pub struct PolicyList { -// policy_definition: String, -// rules: Vec, -// } - type KeTreeRule = KeBoxTree; impl RequestBuilder { @@ -55,7 +47,6 @@ impl RequestBuilder { } pub fn sub(&mut self, sub: impl Into) -> &mut Self { - //adds subject let _ = self.sub.insert(sub.into()); self } @@ -96,8 +87,8 @@ pub struct PolicyInformation { #[derive(Debug, Deserialize, Clone)] pub struct AttributeRules { - attribute_name: String, - attribute_rules: Vec, + attribute: String, + rules: Vec, } #[derive(Clone, Debug, Deserialize)] @@ -112,7 +103,7 @@ use zenoh_config::ZenohId; #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] pub enum Attribute { - UserID(ZenohId), + UserId(ZenohId), NetworkType(String), //clarify MetadataType(String), //clarify } @@ -179,7 +170,7 @@ impl PolicyEnforcer { //for each attrribute in the list, get rules, create map and push into rules_vector let mut pm: Vec = Vec::new(); for (i, _) in attribute_list.iter().enumerate() { - let rm = self.get_rules_list(policy_rules_vector[i].attribute_rules.clone())?; + let rm = self.get_rules_list(policy_rules_vector[i].rules.clone())?; pm.push(rm); } self.policy_list = Some(pm); @@ -224,12 +215,12 @@ impl PolicyEnforcer { .split(' ') .collect::>(); - let complete_ruleset = policy_list_info.rules; + let complete_ruleset = policy_list_info.ruleset; let mut attribute_list: Vec = Vec::new(); let mut policy_rules: Vec = Vec::new(); for rule in complete_ruleset.iter() { - if enforced_attributes.contains(&rule.attribute_name.as_str()) { - attribute_list.push(rule.attribute_name.clone()); + if enforced_attributes.contains(&rule.attribute.as_str()) { + attribute_list.push(rule.attribute.clone()); policy_rules.push(rule.clone()) } } From 50c9f0eaabd77662f34f7f736e7947e1134e6307 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 19:52:37 +0100 Subject: [PATCH 056/122] WIP:merging changes --- DEFAULT_CONFIG.json5 | 17 +- commons/zenoh-config/Cargo.toml | 0 commons/zenoh-config/README.md | 0 commons/zenoh-config/src/defaults.rs | 0 commons/zenoh-config/src/include.rs | 0 commons/zenoh-config/src/lib.rs | 0 pub_config.json5 | 0 rules_test.json5 | 0 zenoh/src/net/codec/linkstate.rs | 0 zenoh/src/net/codec/mod.rs | 0 zenoh/src/net/mod.rs | 0 zenoh/src/net/primitives/demux.rs | 0 zenoh/src/net/primitives/mod.rs | 0 zenoh/src/net/primitives/mux.rs | 0 zenoh/src/net/protocol/linkstate.rs | 0 zenoh/src/net/protocol/mod.rs | 0 zenoh/src/net/routing/dispatcher/face.rs | 0 zenoh/src/net/routing/dispatcher/mod.rs | 0 zenoh/src/net/routing/dispatcher/pubsub.rs | 0 zenoh/src/net/routing/dispatcher/queries.rs | 0 zenoh/src/net/routing/dispatcher/resource.rs | 0 zenoh/src/net/routing/dispatcher/tables.rs | 14 +- zenoh/src/net/routing/hat/client/mod.rs | 0 zenoh/src/net/routing/hat/client/pubsub.rs | 0 zenoh/src/net/routing/hat/client/queries.rs | 0 .../src/net/routing/hat/linkstate_peer/mod.rs | 0 .../net/routing/hat/linkstate_peer/network.rs | 0 .../net/routing/hat/linkstate_peer/pubsub.rs | 0 .../net/routing/hat/linkstate_peer/queries.rs | 0 zenoh/src/net/routing/hat/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 0 zenoh/src/net/routing/hat/p2p_peer/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 0 zenoh/src/net/routing/hat/p2p_peer/queries.rs | 0 zenoh/src/net/routing/hat/router/mod.rs | 0 zenoh/src/net/routing/hat/router/network.rs | 0 zenoh/src/net/routing/hat/router/pubsub.rs | 0 zenoh/src/net/routing/hat/router/queries.rs | 0 .../net/routing/interceptor/accesscontrol.rs | 0 zenoh/src/net/routing/interceptor/authz.rs | 0 .../net/routing/interceptor/downsampling.rs | 0 zenoh/src/net/routing/interceptor/mod.rs | 169 ++---------------- zenoh/src/net/routing/mod.rs | 7 + zenoh/src/net/routing/router.rs | 13 +- zenoh/src/net/runtime/adminspace.rs | 0 zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/net/runtime/orchestrator.rs | 0 zenoh/src/net/tests/mod.rs | 0 zenoh/src/net/tests/tables.rs | 12 +- 49 files changed, 62 insertions(+), 172 deletions(-) mode change 100644 => 100755 DEFAULT_CONFIG.json5 mode change 100644 => 100755 commons/zenoh-config/Cargo.toml mode change 100644 => 100755 commons/zenoh-config/README.md mode change 100644 => 100755 commons/zenoh-config/src/defaults.rs mode change 100644 => 100755 commons/zenoh-config/src/include.rs mode change 100644 => 100755 commons/zenoh-config/src/lib.rs mode change 100644 => 100755 pub_config.json5 mode change 100644 => 100755 rules_test.json5 mode change 100644 => 100755 zenoh/src/net/codec/linkstate.rs mode change 100644 => 100755 zenoh/src/net/codec/mod.rs mode change 100644 => 100755 zenoh/src/net/mod.rs mode change 100644 => 100755 zenoh/src/net/primitives/demux.rs mode change 100644 => 100755 zenoh/src/net/primitives/mod.rs mode change 100644 => 100755 zenoh/src/net/primitives/mux.rs mode change 100644 => 100755 zenoh/src/net/protocol/linkstate.rs mode change 100644 => 100755 zenoh/src/net/protocol/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/face.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/resource.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/tables.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/network.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/gossip.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/network.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/accesscontrol.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/authz.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/downsampling.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/router.rs mode change 100644 => 100755 zenoh/src/net/runtime/adminspace.rs mode change 100644 => 100755 zenoh/src/net/runtime/mod.rs mode change 100644 => 100755 zenoh/src/net/runtime/orchestrator.rs mode change 100644 => 100755 zenoh/src/net/tests/mod.rs mode change 100644 => 100755 zenoh/src/net/tests/tables.rs diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100644 new mode 100755 index 28a50b586d..b68718ac05 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -108,6 +108,21 @@ // // key_expression // ], // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters transport: { unicast: { @@ -484,4 +499,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} \ No newline at end of file +} diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/README.md b/commons/zenoh-config/README.md old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs old mode 100644 new mode 100755 diff --git a/pub_config.json5 b/pub_config.json5 old mode 100644 new mode 100755 diff --git a/rules_test.json5 b/rules_test.json5 old mode 100644 new mode 100755 diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/codec/mod.rs b/zenoh/src/net/codec/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/mod.rs b/zenoh/src/net/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/protocol/mod.rs b/zenoh/src/net/protocol/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs old mode 100644 new mode 100755 index 274b600024..e239a316a1 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -28,6 +28,7 @@ use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; use zenoh_protocol::network::Mapping; +use zenoh_result::ZResult; // use zenoh_collections::Timer; use zenoh_sync::get_mut_unchecked; @@ -76,7 +77,12 @@ pub struct Tables { } impl Tables { - pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { + pub fn new( + zid: ZenohId, + whatami: WhatAmI, + hlc: Option>, + config: &Config, + ) -> ZResult { let drop_future_timestamp = unwrap_or_default!(config.timestamping().drop_future_timestamp()); let router_peers_failover_brokering = @@ -84,7 +90,7 @@ impl Tables { // let queries_default_timeout = // Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); let hat_code = hat::new_hat(whatami, config); - Tables { + Ok(Tables { zid, whatami, face_counter: 0, @@ -96,11 +102,11 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], - interceptors: interceptor_factories(config), + interceptors: interceptor_factories(config)?, pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), - } + }) } #[doc(hidden)] diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs old mode 100644 new mode 100755 index b2135b2b47..f8b9f2d389 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -1,148 +1,3 @@ -// // -// // Copyright (c) 2023 ZettaScale Technology -// // -// // This program and the accompanying materials are made available under the -// // terms of the Eclipse Public License 2.0 which is available at -// // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// // which is available at https://www.apache.org/licenses/LICENSE-2.0. -// // -// // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// // -// // Contributors: -// // ZettaScale Zenoh Team, -// // - -// //! ⚠️ WARNING ⚠️ -// //! -// //! This module is intended for Zenoh's internal use. -// //! -// //! [Click here for Zenoh's documentation](../zenoh/index.html) -// use super::RoutingContext; -// use zenoh_config::Config; -// use zenoh_protocol::network::NetworkMessage; -// use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; - -// pub(crate) trait InterceptorTrait { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option>; -// } - -// pub(crate) type Interceptor = Box; -// pub(crate) type IngressInterceptor = Interceptor; -// pub(crate) type EgressInterceptor = Interceptor; - -// pub(crate) trait InterceptorFactoryTrait { -// fn new_transport_unicast( -// &self, -// transport: &TransportUnicast, -// ) -> (Option, Option); -// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; -// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; -// } - -// pub(crate) type InterceptorFactory = Box; - -// pub(crate) fn interceptor_factories(_config: &Config) -> Vec { -// // Add interceptors here -// // @TODO build the list of intercetors with the correct order from the config -// vec![Box::new(LoggerInterceptor {})] -// //vec![] -// } - -// pub(crate) struct InterceptorsChain { -// pub(crate) interceptors: Vec, -// } - -// impl InterceptorsChain { -// #[allow(dead_code)] -// pub(crate) fn empty() -> Self { -// Self { -// interceptors: vec![], -// } -// } -// } - -// impl From> for InterceptorsChain { -// fn from(interceptors: Vec) -> Self { -// InterceptorsChain { interceptors } -// } -// } - -// impl InterceptorTrait for InterceptorsChain { -// fn intercept( -// &self, -// mut ctx: RoutingContext, -// ) -> Option> { -// for interceptor in &self.interceptors { -// match interceptor.intercept(ctx) { -// Some(newctx) => ctx = newctx, -// None => { -// log::trace!("Msg intercepted!"); -// return None; -// } -// } -// } -// Some(ctx) -// } -// } - -// pub(crate) struct IngressMsgLogger {} - -// impl InterceptorTrait for IngressMsgLogger { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option> { -// log::debug!( -// "Recv {} {} Expr:{:?}", -// ctx.inface() -// .map(|f| f.to_string()) -// .unwrap_or("None".to_string()), -// ctx.msg, -// ctx.full_expr(), -// ); -// Some(ctx) -// } -// } -// pub(crate) struct EgressMsgLogger {} - -// impl InterceptorTrait for EgressMsgLogger { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option> { -// log::debug!("Send {} Expr:{:?}", ctx.msg, ctx.full_expr()); -// Some(ctx) -// } -// } - -// pub(crate) struct LoggerInterceptor {} - -// impl InterceptorFactoryTrait for LoggerInterceptor { -// fn new_transport_unicast( -// &self, -// transport: &TransportUnicast, -// ) -> (Option, Option) { -// log::debug!("New transport unicast {:?}", transport); -// ( -// Some(Box::new(IngressMsgLogger {})), -// Some(Box::new(EgressMsgLogger {})), -// ) -// } - -// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { -// log::debug!("New transport multicast {:?}", transport); -// Some(Box::new(EgressMsgLogger {})) -// } - -// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { -// log::debug!("New peer multicast {:?}", transport); -// Some(Box::new(IngressMsgLogger {})) -// } -// } - // // Copyright (c) 2023 ZettaScale Technology // @@ -171,7 +26,12 @@ use super::RoutingContext; use crate::net::routing::interceptor::{accesscontrol::AclEnforcer, authz::PolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; +use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +pub mod downsampling; +use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; + pub(crate) trait InterceptorTrait { fn intercept( &self, @@ -194,21 +54,15 @@ pub(crate) trait InterceptorFactoryTrait { pub(crate) type InterceptorFactory = Box; -pub(crate) fn interceptor_factories(config: &Config) -> Vec { - // Add interceptors here - // TODO build the list of intercetors with the correct order from the config - //vec![Box::new(LoggerInterceptor {})] +pub(crate) fn interceptor_factories(config: &Config) -> ZResult> { + let mut res: Vec = vec![]; - let mut res: Vec = Vec::new(); - /* - this is the singleton for interceptors - all init code for AC should be called here - example, for casbin we are using the enforecer init here - for in-built AC, we will load the policy rules here and also set the parameters (type of policy etc) - */ + // Uncomment to log the interceptors initialisation + // res.push(Box::new(LoggerInterceptor {})); /* if config condition is selected this will be initialiased; putting true for now */ res.push(Box::new(LoggerInterceptor {})); + res.extend(downsampling_interceptor_factories(config.downsampling())?); //get acl config let acl_config = config.transport().acl().clone(); //get this gracefully @@ -231,8 +85,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> Vec ), } } - res - //store the enforcer instance for use in rest of the sessions + Ok(res) } pub(crate) struct InterceptorsChain { diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs old mode 100644 new mode 100755 index 0b069c1337..373f7d8273 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -24,6 +24,7 @@ pub mod router; use std::{cell::OnceCell, sync::Arc}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use self::{dispatcher::face::Face, router::Resource}; @@ -168,4 +169,10 @@ impl RoutingContext { } None } + + #[inline] + pub(crate) fn full_key_expr(&self) -> Option { + let full_expr = self.full_expr()?; + OwnedKeyExpr::new(full_expr).ok() + } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs old mode 100644 new mode 100755 index 26c9d36185..ba0249af1b --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -45,15 +45,20 @@ pub struct Router { } impl Router { - pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { - Router { + pub fn new( + zid: ZenohId, + whatami: WhatAmI, + hlc: Option>, + config: &Config, + ) -> ZResult { + Ok(Router { // whatami, tables: Arc::new(TablesLock { - tables: RwLock::new(Tables::new(zid, whatami, hlc, config)), + tables: RwLock::new(Tables::new(zid, whatami, hlc, config)?), ctrl_lock: Mutex::new(hat::new_hat(whatami, config)), queries_lock: RwLock::new(()), }), - } + }) } #[allow(clippy::too_many_arguments)] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs old mode 100644 new mode 100755 index ac125421f6..7061b38622 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -95,7 +95,7 @@ impl Runtime { let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); - let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)); + let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)?); let handler = Arc::new(RuntimeTransportEventHandler { runtime: std::sync::RwLock::new(None), diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/tests/mod.rs b/zenoh/src/net/tests/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs old mode 100644 new mode 100755 index 363803f682..ddcdc0084e --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -38,7 +38,8 @@ fn base_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -133,7 +134,8 @@ fn match_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -172,7 +174,8 @@ fn clean_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -478,7 +481,8 @@ fn client_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let sub_info = SubscriberInfo { From a8bb31e99a7da71678c589200e27bf14177debe0 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 19:54:26 +0100 Subject: [PATCH 057/122] Revert "WIP:merging changes" This reverts commit 50c9f0eaabd77662f34f7f736e7947e1134e6307. --- DEFAULT_CONFIG.json5 | 17 +- commons/zenoh-config/Cargo.toml | 0 commons/zenoh-config/README.md | 0 commons/zenoh-config/src/defaults.rs | 0 commons/zenoh-config/src/include.rs | 0 commons/zenoh-config/src/lib.rs | 0 pub_config.json5 | 0 rules_test.json5 | 0 zenoh/src/net/codec/linkstate.rs | 0 zenoh/src/net/codec/mod.rs | 0 zenoh/src/net/mod.rs | 0 zenoh/src/net/primitives/demux.rs | 0 zenoh/src/net/primitives/mod.rs | 0 zenoh/src/net/primitives/mux.rs | 0 zenoh/src/net/protocol/linkstate.rs | 0 zenoh/src/net/protocol/mod.rs | 0 zenoh/src/net/routing/dispatcher/face.rs | 0 zenoh/src/net/routing/dispatcher/mod.rs | 0 zenoh/src/net/routing/dispatcher/pubsub.rs | 0 zenoh/src/net/routing/dispatcher/queries.rs | 0 zenoh/src/net/routing/dispatcher/resource.rs | 0 zenoh/src/net/routing/dispatcher/tables.rs | 14 +- zenoh/src/net/routing/hat/client/mod.rs | 0 zenoh/src/net/routing/hat/client/pubsub.rs | 0 zenoh/src/net/routing/hat/client/queries.rs | 0 .../src/net/routing/hat/linkstate_peer/mod.rs | 0 .../net/routing/hat/linkstate_peer/network.rs | 0 .../net/routing/hat/linkstate_peer/pubsub.rs | 0 .../net/routing/hat/linkstate_peer/queries.rs | 0 zenoh/src/net/routing/hat/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 0 zenoh/src/net/routing/hat/p2p_peer/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 0 zenoh/src/net/routing/hat/p2p_peer/queries.rs | 0 zenoh/src/net/routing/hat/router/mod.rs | 0 zenoh/src/net/routing/hat/router/network.rs | 0 zenoh/src/net/routing/hat/router/pubsub.rs | 0 zenoh/src/net/routing/hat/router/queries.rs | 0 .../net/routing/interceptor/accesscontrol.rs | 0 zenoh/src/net/routing/interceptor/authz.rs | 0 .../net/routing/interceptor/downsampling.rs | 0 zenoh/src/net/routing/interceptor/mod.rs | 169 ++++++++++++++++-- zenoh/src/net/routing/mod.rs | 7 - zenoh/src/net/routing/router.rs | 13 +- zenoh/src/net/runtime/adminspace.rs | 0 zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/net/runtime/orchestrator.rs | 0 zenoh/src/net/tests/mod.rs | 0 zenoh/src/net/tests/tables.rs | 12 +- 49 files changed, 172 insertions(+), 62 deletions(-) mode change 100755 => 100644 DEFAULT_CONFIG.json5 mode change 100755 => 100644 commons/zenoh-config/Cargo.toml mode change 100755 => 100644 commons/zenoh-config/README.md mode change 100755 => 100644 commons/zenoh-config/src/defaults.rs mode change 100755 => 100644 commons/zenoh-config/src/include.rs mode change 100755 => 100644 commons/zenoh-config/src/lib.rs mode change 100755 => 100644 pub_config.json5 mode change 100755 => 100644 rules_test.json5 mode change 100755 => 100644 zenoh/src/net/codec/linkstate.rs mode change 100755 => 100644 zenoh/src/net/codec/mod.rs mode change 100755 => 100644 zenoh/src/net/mod.rs mode change 100755 => 100644 zenoh/src/net/primitives/demux.rs mode change 100755 => 100644 zenoh/src/net/primitives/mod.rs mode change 100755 => 100644 zenoh/src/net/primitives/mux.rs mode change 100755 => 100644 zenoh/src/net/protocol/linkstate.rs mode change 100755 => 100644 zenoh/src/net/protocol/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/face.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/resource.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/tables.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/network.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/gossip.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/network.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/accesscontrol.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/authz.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/downsampling.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/router.rs mode change 100755 => 100644 zenoh/src/net/runtime/adminspace.rs mode change 100755 => 100644 zenoh/src/net/runtime/mod.rs mode change 100755 => 100644 zenoh/src/net/runtime/orchestrator.rs mode change 100755 => 100644 zenoh/src/net/tests/mod.rs mode change 100755 => 100644 zenoh/src/net/tests/tables.rs diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100755 new mode 100644 index b68718ac05..28a50b586d --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -108,21 +108,6 @@ // // key_expression // ], // }, - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, - // ], - // }, - // ], - /// Configure internal transport parameters transport: { unicast: { @@ -499,4 +484,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} +} \ No newline at end of file diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/README.md b/commons/zenoh-config/README.md old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs old mode 100755 new mode 100644 diff --git a/pub_config.json5 b/pub_config.json5 old mode 100755 new mode 100644 diff --git a/rules_test.json5 b/rules_test.json5 old mode 100755 new mode 100644 diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/codec/mod.rs b/zenoh/src/net/codec/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/mod.rs b/zenoh/src/net/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/protocol/mod.rs b/zenoh/src/net/protocol/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs old mode 100755 new mode 100644 index e239a316a1..274b600024 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -28,7 +28,6 @@ use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; use zenoh_protocol::network::Mapping; -use zenoh_result::ZResult; // use zenoh_collections::Timer; use zenoh_sync::get_mut_unchecked; @@ -77,12 +76,7 @@ pub struct Tables { } impl Tables { - pub fn new( - zid: ZenohId, - whatami: WhatAmI, - hlc: Option>, - config: &Config, - ) -> ZResult { + pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { let drop_future_timestamp = unwrap_or_default!(config.timestamping().drop_future_timestamp()); let router_peers_failover_brokering = @@ -90,7 +84,7 @@ impl Tables { // let queries_default_timeout = // Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); let hat_code = hat::new_hat(whatami, config); - Ok(Tables { + Tables { zid, whatami, face_counter: 0, @@ -102,11 +96,11 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], - interceptors: interceptor_factories(config)?, + interceptors: interceptor_factories(config), pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), - }) + } } #[doc(hidden)] diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs old mode 100755 new mode 100644 index f8b9f2d389..b2135b2b47 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -1,3 +1,148 @@ +// // +// // Copyright (c) 2023 ZettaScale Technology +// // +// // This program and the accompanying materials are made available under the +// // terms of the Eclipse Public License 2.0 which is available at +// // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// // which is available at https://www.apache.org/licenses/LICENSE-2.0. +// // +// // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// // +// // Contributors: +// // ZettaScale Zenoh Team, +// // + +// //! ⚠️ WARNING ⚠️ +// //! +// //! This module is intended for Zenoh's internal use. +// //! +// //! [Click here for Zenoh's documentation](../zenoh/index.html) +// use super::RoutingContext; +// use zenoh_config::Config; +// use zenoh_protocol::network::NetworkMessage; +// use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +// pub(crate) trait InterceptorTrait { +// fn intercept( +// &self, +// ctx: RoutingContext, +// ) -> Option>; +// } + +// pub(crate) type Interceptor = Box; +// pub(crate) type IngressInterceptor = Interceptor; +// pub(crate) type EgressInterceptor = Interceptor; + +// pub(crate) trait InterceptorFactoryTrait { +// fn new_transport_unicast( +// &self, +// transport: &TransportUnicast, +// ) -> (Option, Option); +// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; +// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; +// } + +// pub(crate) type InterceptorFactory = Box; + +// pub(crate) fn interceptor_factories(_config: &Config) -> Vec { +// // Add interceptors here +// // @TODO build the list of intercetors with the correct order from the config +// vec![Box::new(LoggerInterceptor {})] +// //vec![] +// } + +// pub(crate) struct InterceptorsChain { +// pub(crate) interceptors: Vec, +// } + +// impl InterceptorsChain { +// #[allow(dead_code)] +// pub(crate) fn empty() -> Self { +// Self { +// interceptors: vec![], +// } +// } +// } + +// impl From> for InterceptorsChain { +// fn from(interceptors: Vec) -> Self { +// InterceptorsChain { interceptors } +// } +// } + +// impl InterceptorTrait for InterceptorsChain { +// fn intercept( +// &self, +// mut ctx: RoutingContext, +// ) -> Option> { +// for interceptor in &self.interceptors { +// match interceptor.intercept(ctx) { +// Some(newctx) => ctx = newctx, +// None => { +// log::trace!("Msg intercepted!"); +// return None; +// } +// } +// } +// Some(ctx) +// } +// } + +// pub(crate) struct IngressMsgLogger {} + +// impl InterceptorTrait for IngressMsgLogger { +// fn intercept( +// &self, +// ctx: RoutingContext, +// ) -> Option> { +// log::debug!( +// "Recv {} {} Expr:{:?}", +// ctx.inface() +// .map(|f| f.to_string()) +// .unwrap_or("None".to_string()), +// ctx.msg, +// ctx.full_expr(), +// ); +// Some(ctx) +// } +// } +// pub(crate) struct EgressMsgLogger {} + +// impl InterceptorTrait for EgressMsgLogger { +// fn intercept( +// &self, +// ctx: RoutingContext, +// ) -> Option> { +// log::debug!("Send {} Expr:{:?}", ctx.msg, ctx.full_expr()); +// Some(ctx) +// } +// } + +// pub(crate) struct LoggerInterceptor {} + +// impl InterceptorFactoryTrait for LoggerInterceptor { +// fn new_transport_unicast( +// &self, +// transport: &TransportUnicast, +// ) -> (Option, Option) { +// log::debug!("New transport unicast {:?}", transport); +// ( +// Some(Box::new(IngressMsgLogger {})), +// Some(Box::new(EgressMsgLogger {})), +// ) +// } + +// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { +// log::debug!("New transport multicast {:?}", transport); +// Some(Box::new(EgressMsgLogger {})) +// } + +// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { +// log::debug!("New peer multicast {:?}", transport); +// Some(Box::new(IngressMsgLogger {})) +// } +// } + // // Copyright (c) 2023 ZettaScale Technology // @@ -26,12 +171,7 @@ use super::RoutingContext; use crate::net::routing::interceptor::{accesscontrol::AclEnforcer, authz::PolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; -use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; - -pub mod downsampling; -use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; - pub(crate) trait InterceptorTrait { fn intercept( &self, @@ -54,15 +194,21 @@ pub(crate) trait InterceptorFactoryTrait { pub(crate) type InterceptorFactory = Box; -pub(crate) fn interceptor_factories(config: &Config) -> ZResult> { - let mut res: Vec = vec![]; +pub(crate) fn interceptor_factories(config: &Config) -> Vec { + // Add interceptors here + // TODO build the list of intercetors with the correct order from the config + //vec![Box::new(LoggerInterceptor {})] - // Uncomment to log the interceptors initialisation - // res.push(Box::new(LoggerInterceptor {})); + let mut res: Vec = Vec::new(); + /* + this is the singleton for interceptors + all init code for AC should be called here + example, for casbin we are using the enforecer init here + for in-built AC, we will load the policy rules here and also set the parameters (type of policy etc) + */ /* if config condition is selected this will be initialiased; putting true for now */ res.push(Box::new(LoggerInterceptor {})); - res.extend(downsampling_interceptor_factories(config.downsampling())?); //get acl config let acl_config = config.transport().acl().clone(); //get this gracefully @@ -85,7 +231,8 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult { } None } - - #[inline] - pub(crate) fn full_key_expr(&self) -> Option { - let full_expr = self.full_expr()?; - OwnedKeyExpr::new(full_expr).ok() - } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs old mode 100755 new mode 100644 index ba0249af1b..26c9d36185 --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -45,20 +45,15 @@ pub struct Router { } impl Router { - pub fn new( - zid: ZenohId, - whatami: WhatAmI, - hlc: Option>, - config: &Config, - ) -> ZResult { - Ok(Router { + pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { + Router { // whatami, tables: Arc::new(TablesLock { - tables: RwLock::new(Tables::new(zid, whatami, hlc, config)?), + tables: RwLock::new(Tables::new(zid, whatami, hlc, config)), ctrl_lock: Mutex::new(hat::new_hat(whatami, config)), queries_lock: RwLock::new(()), }), - }) + } } #[allow(clippy::too_many_arguments)] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs old mode 100755 new mode 100644 index 7061b38622..ac125421f6 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -95,7 +95,7 @@ impl Runtime { let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); - let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)?); + let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)); let handler = Arc::new(RuntimeTransportEventHandler { runtime: std::sync::RwLock::new(None), diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/tests/mod.rs b/zenoh/src/net/tests/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs old mode 100755 new mode 100644 index ddcdc0084e..363803f682 --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -38,8 +38,7 @@ fn base_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ) - .unwrap(); + ); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -134,8 +133,7 @@ fn match_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ) - .unwrap(); + ); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -174,8 +172,7 @@ fn clean_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ) - .unwrap(); + ); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -481,8 +478,7 @@ fn client_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ) - .unwrap(); + ); let tables = router.tables.clone(); let sub_info = SubscriberInfo { From 187c4cb435e934fa61d069ea7e9c4d32b093ab48 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 20:10:56 +0100 Subject: [PATCH 058/122] WIP:merging new changes --- DEFAULT_CONFIG.json5 | 17 +- commons/zenoh-config/Cargo.toml | 0 commons/zenoh-config/README.md | 0 commons/zenoh-config/src/defaults.rs | 0 commons/zenoh-config/src/include.rs | 0 commons/zenoh-config/src/lib.rs | 0 pub_config.json5 | 0 sub_config.json5 | 0 zenoh/src/net/codec/linkstate.rs | 0 zenoh/src/net/codec/mod.rs | 0 zenoh/src/net/mod.rs | 0 zenoh/src/net/primitives/demux.rs | 0 zenoh/src/net/primitives/mod.rs | 0 zenoh/src/net/primitives/mux.rs | 0 zenoh/src/net/protocol/linkstate.rs | 0 zenoh/src/net/protocol/mod.rs | 0 zenoh/src/net/routing/dispatcher/face.rs | 0 zenoh/src/net/routing/dispatcher/mod.rs | 0 zenoh/src/net/routing/dispatcher/pubsub.rs | 0 zenoh/src/net/routing/dispatcher/queries.rs | 0 zenoh/src/net/routing/dispatcher/resource.rs | 0 zenoh/src/net/routing/dispatcher/tables.rs | 14 +- zenoh/src/net/routing/hat/client/mod.rs | 0 zenoh/src/net/routing/hat/client/pubsub.rs | 0 zenoh/src/net/routing/hat/client/queries.rs | 0 .../src/net/routing/hat/linkstate_peer/mod.rs | 0 .../net/routing/hat/linkstate_peer/network.rs | 0 .../net/routing/hat/linkstate_peer/pubsub.rs | 0 .../net/routing/hat/linkstate_peer/queries.rs | 0 zenoh/src/net/routing/hat/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 0 zenoh/src/net/routing/hat/p2p_peer/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 0 zenoh/src/net/routing/hat/p2p_peer/queries.rs | 0 zenoh/src/net/routing/hat/router/mod.rs | 0 zenoh/src/net/routing/hat/router/network.rs | 0 zenoh/src/net/routing/hat/router/pubsub.rs | 0 zenoh/src/net/routing/hat/router/queries.rs | 0 .../net/routing/interceptor/accesscontrol.rs | 0 zenoh/src/net/routing/interceptor/authz.rs | 0 .../net/routing/interceptor/downsampling.rs | 0 zenoh/src/net/routing/interceptor/mod.rs | 169 ++---------------- zenoh/src/net/routing/mod.rs | 7 + zenoh/src/net/routing/router.rs | 13 +- zenoh/src/net/runtime/adminspace.rs | 0 zenoh/src/net/runtime/mod.rs | 2 +- zenoh/src/net/runtime/orchestrator.rs | 0 zenoh/src/net/tests/mod.rs | 0 zenoh/src/net/tests/tables.rs | 12 +- 49 files changed, 62 insertions(+), 172 deletions(-) mode change 100644 => 100755 DEFAULT_CONFIG.json5 mode change 100644 => 100755 commons/zenoh-config/Cargo.toml mode change 100644 => 100755 commons/zenoh-config/README.md mode change 100644 => 100755 commons/zenoh-config/src/defaults.rs mode change 100644 => 100755 commons/zenoh-config/src/include.rs mode change 100644 => 100755 commons/zenoh-config/src/lib.rs mode change 100644 => 100755 pub_config.json5 mode change 100644 => 100755 sub_config.json5 mode change 100644 => 100755 zenoh/src/net/codec/linkstate.rs mode change 100644 => 100755 zenoh/src/net/codec/mod.rs mode change 100644 => 100755 zenoh/src/net/mod.rs mode change 100644 => 100755 zenoh/src/net/primitives/demux.rs mode change 100644 => 100755 zenoh/src/net/primitives/mod.rs mode change 100644 => 100755 zenoh/src/net/primitives/mux.rs mode change 100644 => 100755 zenoh/src/net/protocol/linkstate.rs mode change 100644 => 100755 zenoh/src/net/protocol/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/face.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/resource.rs mode change 100644 => 100755 zenoh/src/net/routing/dispatcher/tables.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/client/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/network.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/linkstate_peer/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/gossip.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/p2p_peer/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/network.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/pubsub.rs mode change 100644 => 100755 zenoh/src/net/routing/hat/router/queries.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/accesscontrol.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/authz.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/downsampling.rs mode change 100644 => 100755 zenoh/src/net/routing/interceptor/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/mod.rs mode change 100644 => 100755 zenoh/src/net/routing/router.rs mode change 100644 => 100755 zenoh/src/net/runtime/adminspace.rs mode change 100644 => 100755 zenoh/src/net/runtime/mod.rs mode change 100644 => 100755 zenoh/src/net/runtime/orchestrator.rs mode change 100644 => 100755 zenoh/src/net/tests/mod.rs mode change 100644 => 100755 zenoh/src/net/tests/tables.rs diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100644 new mode 100755 index 28a50b586d..b68718ac05 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -108,6 +108,21 @@ // // key_expression // ], // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters transport: { unicast: { @@ -484,4 +499,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} \ No newline at end of file +} diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/README.md b/commons/zenoh-config/README.md old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs old mode 100644 new mode 100755 diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs old mode 100644 new mode 100755 diff --git a/pub_config.json5 b/pub_config.json5 old mode 100644 new mode 100755 diff --git a/sub_config.json5 b/sub_config.json5 old mode 100644 new mode 100755 diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/codec/mod.rs b/zenoh/src/net/codec/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/mod.rs b/zenoh/src/net/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/protocol/mod.rs b/zenoh/src/net/protocol/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs old mode 100644 new mode 100755 index 274b600024..e239a316a1 --- a/zenoh/src/net/routing/dispatcher/tables.rs +++ b/zenoh/src/net/routing/dispatcher/tables.rs @@ -28,6 +28,7 @@ use zenoh_config::unwrap_or_default; use zenoh_config::Config; use zenoh_protocol::core::{ExprId, WhatAmI, ZenohId}; use zenoh_protocol::network::Mapping; +use zenoh_result::ZResult; // use zenoh_collections::Timer; use zenoh_sync::get_mut_unchecked; @@ -76,7 +77,12 @@ pub struct Tables { } impl Tables { - pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { + pub fn new( + zid: ZenohId, + whatami: WhatAmI, + hlc: Option>, + config: &Config, + ) -> ZResult { let drop_future_timestamp = unwrap_or_default!(config.timestamping().drop_future_timestamp()); let router_peers_failover_brokering = @@ -84,7 +90,7 @@ impl Tables { // let queries_default_timeout = // Duration::from_millis(unwrap_or_default!(config.queries_default_timeout())); let hat_code = hat::new_hat(whatami, config); - Tables { + Ok(Tables { zid, whatami, face_counter: 0, @@ -96,11 +102,11 @@ impl Tables { faces: HashMap::new(), mcast_groups: vec![], mcast_faces: vec![], - interceptors: interceptor_factories(config), + interceptors: interceptor_factories(config)?, pull_caches_lock: Mutex::new(()), hat: hat_code.new_tables(router_peers_failover_brokering), hat_code: hat_code.into(), - } + }) } #[doc(hidden)] diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs old mode 100644 new mode 100755 index b2135b2b47..f8b9f2d389 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -1,148 +1,3 @@ -// // -// // Copyright (c) 2023 ZettaScale Technology -// // -// // This program and the accompanying materials are made available under the -// // terms of the Eclipse Public License 2.0 which is available at -// // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// // which is available at https://www.apache.org/licenses/LICENSE-2.0. -// // -// // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// // -// // Contributors: -// // ZettaScale Zenoh Team, -// // - -// //! ⚠️ WARNING ⚠️ -// //! -// //! This module is intended for Zenoh's internal use. -// //! -// //! [Click here for Zenoh's documentation](../zenoh/index.html) -// use super::RoutingContext; -// use zenoh_config::Config; -// use zenoh_protocol::network::NetworkMessage; -// use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; - -// pub(crate) trait InterceptorTrait { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option>; -// } - -// pub(crate) type Interceptor = Box; -// pub(crate) type IngressInterceptor = Interceptor; -// pub(crate) type EgressInterceptor = Interceptor; - -// pub(crate) trait InterceptorFactoryTrait { -// fn new_transport_unicast( -// &self, -// transport: &TransportUnicast, -// ) -> (Option, Option); -// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option; -// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option; -// } - -// pub(crate) type InterceptorFactory = Box; - -// pub(crate) fn interceptor_factories(_config: &Config) -> Vec { -// // Add interceptors here -// // @TODO build the list of intercetors with the correct order from the config -// vec![Box::new(LoggerInterceptor {})] -// //vec![] -// } - -// pub(crate) struct InterceptorsChain { -// pub(crate) interceptors: Vec, -// } - -// impl InterceptorsChain { -// #[allow(dead_code)] -// pub(crate) fn empty() -> Self { -// Self { -// interceptors: vec![], -// } -// } -// } - -// impl From> for InterceptorsChain { -// fn from(interceptors: Vec) -> Self { -// InterceptorsChain { interceptors } -// } -// } - -// impl InterceptorTrait for InterceptorsChain { -// fn intercept( -// &self, -// mut ctx: RoutingContext, -// ) -> Option> { -// for interceptor in &self.interceptors { -// match interceptor.intercept(ctx) { -// Some(newctx) => ctx = newctx, -// None => { -// log::trace!("Msg intercepted!"); -// return None; -// } -// } -// } -// Some(ctx) -// } -// } - -// pub(crate) struct IngressMsgLogger {} - -// impl InterceptorTrait for IngressMsgLogger { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option> { -// log::debug!( -// "Recv {} {} Expr:{:?}", -// ctx.inface() -// .map(|f| f.to_string()) -// .unwrap_or("None".to_string()), -// ctx.msg, -// ctx.full_expr(), -// ); -// Some(ctx) -// } -// } -// pub(crate) struct EgressMsgLogger {} - -// impl InterceptorTrait for EgressMsgLogger { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option> { -// log::debug!("Send {} Expr:{:?}", ctx.msg, ctx.full_expr()); -// Some(ctx) -// } -// } - -// pub(crate) struct LoggerInterceptor {} - -// impl InterceptorFactoryTrait for LoggerInterceptor { -// fn new_transport_unicast( -// &self, -// transport: &TransportUnicast, -// ) -> (Option, Option) { -// log::debug!("New transport unicast {:?}", transport); -// ( -// Some(Box::new(IngressMsgLogger {})), -// Some(Box::new(EgressMsgLogger {})), -// ) -// } - -// fn new_transport_multicast(&self, transport: &TransportMulticast) -> Option { -// log::debug!("New transport multicast {:?}", transport); -// Some(Box::new(EgressMsgLogger {})) -// } - -// fn new_peer_multicast(&self, transport: &TransportMulticast) -> Option { -// log::debug!("New peer multicast {:?}", transport); -// Some(Box::new(IngressMsgLogger {})) -// } -// } - // // Copyright (c) 2023 ZettaScale Technology // @@ -171,7 +26,12 @@ use super::RoutingContext; use crate::net::routing::interceptor::{accesscontrol::AclEnforcer, authz::PolicyEnforcer}; use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; +use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; + +pub mod downsampling; +use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; + pub(crate) trait InterceptorTrait { fn intercept( &self, @@ -194,21 +54,15 @@ pub(crate) trait InterceptorFactoryTrait { pub(crate) type InterceptorFactory = Box; -pub(crate) fn interceptor_factories(config: &Config) -> Vec { - // Add interceptors here - // TODO build the list of intercetors with the correct order from the config - //vec![Box::new(LoggerInterceptor {})] +pub(crate) fn interceptor_factories(config: &Config) -> ZResult> { + let mut res: Vec = vec![]; - let mut res: Vec = Vec::new(); - /* - this is the singleton for interceptors - all init code for AC should be called here - example, for casbin we are using the enforecer init here - for in-built AC, we will load the policy rules here and also set the parameters (type of policy etc) - */ + // Uncomment to log the interceptors initialisation + // res.push(Box::new(LoggerInterceptor {})); /* if config condition is selected this will be initialiased; putting true for now */ res.push(Box::new(LoggerInterceptor {})); + res.extend(downsampling_interceptor_factories(config.downsampling())?); //get acl config let acl_config = config.transport().acl().clone(); //get this gracefully @@ -231,8 +85,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> Vec ), } } - res - //store the enforcer instance for use in rest of the sessions + Ok(res) } pub(crate) struct InterceptorsChain { diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs old mode 100644 new mode 100755 index 0b069c1337..373f7d8273 --- a/zenoh/src/net/routing/mod.rs +++ b/zenoh/src/net/routing/mod.rs @@ -24,6 +24,7 @@ pub mod router; use std::{cell::OnceCell, sync::Arc}; +use zenoh_protocol::core::key_expr::OwnedKeyExpr; use zenoh_protocol::{core::WireExpr, network::NetworkMessage}; use self::{dispatcher::face::Face, router::Resource}; @@ -168,4 +169,10 @@ impl RoutingContext { } None } + + #[inline] + pub(crate) fn full_key_expr(&self) -> Option { + let full_expr = self.full_expr()?; + OwnedKeyExpr::new(full_expr).ok() + } } diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs old mode 100644 new mode 100755 index 26c9d36185..ba0249af1b --- a/zenoh/src/net/routing/router.rs +++ b/zenoh/src/net/routing/router.rs @@ -45,15 +45,20 @@ pub struct Router { } impl Router { - pub fn new(zid: ZenohId, whatami: WhatAmI, hlc: Option>, config: &Config) -> Self { - Router { + pub fn new( + zid: ZenohId, + whatami: WhatAmI, + hlc: Option>, + config: &Config, + ) -> ZResult { + Ok(Router { // whatami, tables: Arc::new(TablesLock { - tables: RwLock::new(Tables::new(zid, whatami, hlc, config)), + tables: RwLock::new(Tables::new(zid, whatami, hlc, config)?), ctrl_lock: Mutex::new(hat::new_hat(whatami, config)), queries_lock: RwLock::new(()), }), - } + }) } #[allow(clippy::too_many_arguments)] diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs old mode 100644 new mode 100755 index ac125421f6..7061b38622 --- a/zenoh/src/net/runtime/mod.rs +++ b/zenoh/src/net/runtime/mod.rs @@ -95,7 +95,7 @@ impl Runtime { let hlc = (*unwrap_or_default!(config.timestamping().enabled().get(whatami))) .then(|| Arc::new(HLCBuilder::new().with_id(uhlc::ID::from(&zid)).build())); - let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)); + let router = Arc::new(Router::new(zid, whatami, hlc.clone(), &config)?); let handler = Arc::new(RuntimeTransportEventHandler { runtime: std::sync::RwLock::new(None), diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/tests/mod.rs b/zenoh/src/net/tests/mod.rs old mode 100644 new mode 100755 diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs old mode 100644 new mode 100755 index 363803f682..ddcdc0084e --- a/zenoh/src/net/tests/tables.rs +++ b/zenoh/src/net/tests/tables.rs @@ -38,7 +38,8 @@ fn base_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -133,7 +134,8 @@ fn match_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -172,7 +174,8 @@ fn clean_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let primitives = Arc::new(DummyPrimitives {}); @@ -478,7 +481,8 @@ fn client_test() { WhatAmI::Client, Some(Arc::new(HLC::default())), &config, - ); + ) + .unwrap(); let tables = router.tables.clone(); let sub_info = SubscriberInfo { From a1fda634abd599727b5a6d7bd994f75bcb878a18 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 21 Feb 2024 20:16:01 +0100 Subject: [PATCH 059/122] WIP:merging new changes --- .github/workflows/crates_check.sh | 0 .github/workflows/crates_publish.sh | 0 DEFAULT_CONFIG.json5 | 0 commons/zenoh-config/Cargo.toml | 0 commons/zenoh-config/README.md | 0 commons/zenoh-config/src/defaults.rs | 0 commons/zenoh-config/src/include.rs | 0 commons/zenoh-config/src/lib.rs | 0 pub_config.json5 | 0 sub_config.json5 | 0 zenoh/src/net/codec/linkstate.rs | 0 zenoh/src/net/codec/mod.rs | 0 zenoh/src/net/mod.rs | 0 zenoh/src/net/primitives/demux.rs | 0 zenoh/src/net/primitives/mod.rs | 0 zenoh/src/net/primitives/mux.rs | 0 zenoh/src/net/protocol/linkstate.rs | 0 zenoh/src/net/protocol/mod.rs | 0 zenoh/src/net/routing/dispatcher/face.rs | 0 zenoh/src/net/routing/dispatcher/mod.rs | 0 zenoh/src/net/routing/dispatcher/pubsub.rs | 0 zenoh/src/net/routing/dispatcher/queries.rs | 0 zenoh/src/net/routing/dispatcher/resource.rs | 0 zenoh/src/net/routing/dispatcher/tables.rs | 0 zenoh/src/net/routing/hat/client/mod.rs | 0 zenoh/src/net/routing/hat/client/pubsub.rs | 0 zenoh/src/net/routing/hat/client/queries.rs | 0 zenoh/src/net/routing/hat/linkstate_peer/mod.rs | 0 zenoh/src/net/routing/hat/linkstate_peer/network.rs | 0 zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs | 0 zenoh/src/net/routing/hat/linkstate_peer/queries.rs | 0 zenoh/src/net/routing/hat/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/gossip.rs | 0 zenoh/src/net/routing/hat/p2p_peer/mod.rs | 0 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs | 0 zenoh/src/net/routing/hat/p2p_peer/queries.rs | 0 zenoh/src/net/routing/hat/router/mod.rs | 0 zenoh/src/net/routing/hat/router/network.rs | 0 zenoh/src/net/routing/hat/router/pubsub.rs | 0 zenoh/src/net/routing/hat/router/queries.rs | 0 zenoh/src/net/routing/interceptor/accesscontrol.rs | 0 zenoh/src/net/routing/interceptor/authz.rs | 0 zenoh/src/net/routing/interceptor/downsampling.rs | 0 zenoh/src/net/routing/interceptor/mod.rs | 0 zenoh/src/net/routing/mod.rs | 0 zenoh/src/net/routing/router.rs | 0 zenoh/src/net/runtime/adminspace.rs | 0 zenoh/src/net/runtime/mod.rs | 0 zenoh/src/net/runtime/orchestrator.rs | 0 zenoh/src/net/tests/mod.rs | 0 zenoh/src/net/tests/tables.rs | 0 51 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 .github/workflows/crates_check.sh mode change 100755 => 100644 .github/workflows/crates_publish.sh mode change 100755 => 100644 DEFAULT_CONFIG.json5 mode change 100755 => 100644 commons/zenoh-config/Cargo.toml mode change 100755 => 100644 commons/zenoh-config/README.md mode change 100755 => 100644 commons/zenoh-config/src/defaults.rs mode change 100755 => 100644 commons/zenoh-config/src/include.rs mode change 100755 => 100644 commons/zenoh-config/src/lib.rs mode change 100755 => 100644 pub_config.json5 mode change 100755 => 100644 sub_config.json5 mode change 100755 => 100644 zenoh/src/net/codec/linkstate.rs mode change 100755 => 100644 zenoh/src/net/codec/mod.rs mode change 100755 => 100644 zenoh/src/net/mod.rs mode change 100755 => 100644 zenoh/src/net/primitives/demux.rs mode change 100755 => 100644 zenoh/src/net/primitives/mod.rs mode change 100755 => 100644 zenoh/src/net/primitives/mux.rs mode change 100755 => 100644 zenoh/src/net/protocol/linkstate.rs mode change 100755 => 100644 zenoh/src/net/protocol/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/face.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/resource.rs mode change 100755 => 100644 zenoh/src/net/routing/dispatcher/tables.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/client/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/network.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/linkstate_peer/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/gossip.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/p2p_peer/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/network.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/pubsub.rs mode change 100755 => 100644 zenoh/src/net/routing/hat/router/queries.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/accesscontrol.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/authz.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/downsampling.rs mode change 100755 => 100644 zenoh/src/net/routing/interceptor/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/mod.rs mode change 100755 => 100644 zenoh/src/net/routing/router.rs mode change 100755 => 100644 zenoh/src/net/runtime/adminspace.rs mode change 100755 => 100644 zenoh/src/net/runtime/mod.rs mode change 100755 => 100644 zenoh/src/net/runtime/orchestrator.rs mode change 100755 => 100644 zenoh/src/net/tests/mod.rs mode change 100755 => 100644 zenoh/src/net/tests/tables.rs diff --git a/.github/workflows/crates_check.sh b/.github/workflows/crates_check.sh old mode 100755 new mode 100644 diff --git a/.github/workflows/crates_publish.sh b/.github/workflows/crates_publish.sh old mode 100755 new mode 100644 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/README.md b/commons/zenoh-config/README.md old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/include.rs b/commons/zenoh-config/src/include.rs old mode 100755 new mode 100644 diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs old mode 100755 new mode 100644 diff --git a/pub_config.json5 b/pub_config.json5 old mode 100755 new mode 100644 diff --git a/sub_config.json5 b/sub_config.json5 old mode 100755 new mode 100644 diff --git a/zenoh/src/net/codec/linkstate.rs b/zenoh/src/net/codec/linkstate.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/codec/mod.rs b/zenoh/src/net/codec/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/mod.rs b/zenoh/src/net/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/demux.rs b/zenoh/src/net/primitives/demux.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/mod.rs b/zenoh/src/net/primitives/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/primitives/mux.rs b/zenoh/src/net/primitives/mux.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/protocol/linkstate.rs b/zenoh/src/net/protocol/linkstate.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/protocol/mod.rs b/zenoh/src/net/protocol/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/face.rs b/zenoh/src/net/routing/dispatcher/face.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/mod.rs b/zenoh/src/net/routing/dispatcher/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/pubsub.rs b/zenoh/src/net/routing/dispatcher/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/queries.rs b/zenoh/src/net/routing/dispatcher/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/resource.rs b/zenoh/src/net/routing/dispatcher/resource.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/dispatcher/tables.rs b/zenoh/src/net/routing/dispatcher/tables.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/client/mod.rs b/zenoh/src/net/routing/hat/client/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/client/pubsub.rs b/zenoh/src/net/routing/hat/client/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/client/queries.rs b/zenoh/src/net/routing/hat/client/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/mod.rs b/zenoh/src/net/routing/hat/linkstate_peer/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/network.rs b/zenoh/src/net/routing/hat/linkstate_peer/network.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs b/zenoh/src/net/routing/hat/linkstate_peer/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/linkstate_peer/queries.rs b/zenoh/src/net/routing/hat/linkstate_peer/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/mod.rs b/zenoh/src/net/routing/hat/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/gossip.rs b/zenoh/src/net/routing/hat/p2p_peer/gossip.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/mod.rs b/zenoh/src/net/routing/hat/p2p_peer/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs b/zenoh/src/net/routing/hat/p2p_peer/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/p2p_peer/queries.rs b/zenoh/src/net/routing/hat/p2p_peer/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/mod.rs b/zenoh/src/net/routing/hat/router/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/network.rs b/zenoh/src/net/routing/hat/router/network.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/pubsub.rs b/zenoh/src/net/routing/hat/router/pubsub.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/hat/router/queries.rs b/zenoh/src/net/routing/hat/router/queries.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/mod.rs b/zenoh/src/net/routing/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/routing/router.rs b/zenoh/src/net/routing/router.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/runtime/adminspace.rs b/zenoh/src/net/runtime/adminspace.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/runtime/mod.rs b/zenoh/src/net/runtime/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/runtime/orchestrator.rs b/zenoh/src/net/runtime/orchestrator.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/tests/mod.rs b/zenoh/src/net/tests/mod.rs old mode 100755 new mode 100644 diff --git a/zenoh/src/net/tests/tables.rs b/zenoh/src/net/tests/tables.rs old mode 100755 new mode 100644 From fd674e29873d9e96327811f34543c5f2df3f3425 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 28 Feb 2024 17:45:44 +0100 Subject: [PATCH 060/122] WIP:ACL with networkinterface --- DEFAULT_CONFIG.json5 | 72 +-- DEFAULT_CONFIG.json5_old.json5 | 502 ++++++++++++++++++ commons/zenoh-config/src/defaults.rs | 1 - commons/zenoh-config/src/lib.rs | 26 +- .../net/routing/interceptor/accesscontrol.rs | 126 +++-- zenoh/src/net/routing/interceptor/authz.rs | 318 +++++------ zenoh/src/net/routing/interceptor/mod.rs | 3 +- 7 files changed, 711 insertions(+), 337 deletions(-) create mode 100644 DEFAULT_CONFIG.json5_old.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index b68718ac05..12d98591a2 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -108,7 +108,6 @@ // // key_expression // ], // }, - // /// The downsampling declaration. // downsampling: [ // { @@ -122,7 +121,6 @@ // ], // }, // ], - /// Configure internal transport parameters transport: { unicast: { @@ -279,74 +277,22 @@ enabled: true, default_deny: true, policy_list: { - "policy_definition": "UserId", + "policy_definition": "NetworkInterface", "ruleset": [ { - "attribute": "UserId", + "attribute": "NetworkInterface", "rules": [ { - "sub": "aaa3b411006ad57868988f9fec672a31", + "subject": "wifi0", "ke": "test/thr", - "action": "Write", - "permission": true + "action": "Pub", + "permission": "Allow" }, { - "sub": "bbb3b411006ad57868988f9fec672a31", + "subject": "wifi0", "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "bbb3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Write", - "permission": true - } - ] - }, - { - "attribute": "NetworkType", - "rules": [ - { - "sub": "wlan0", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "wlan0", - "ke": "test/thr", - "action": "Read", - "permission": true - } - ] - }, - { - "attribute": "user_defined_attribute", - "rules": [ - { - "sub": "value_1", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "value_2", - "ke": "test/thr", - "action": "Read", - "permission": true + "action": "Sub", + "permission": "Allow" } ] } @@ -499,4 +445,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} +} \ No newline at end of file diff --git a/DEFAULT_CONFIG.json5_old.json5 b/DEFAULT_CONFIG.json5_old.json5 new file mode 100644 index 0000000000..b68718ac05 --- /dev/null +++ b/DEFAULT_CONFIG.json5_old.json5 @@ -0,0 +1,502 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + id: "aaabbb11006ad57868988f9fec672a31", + /// The node's mode (router, peer or client) + mode: "router", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + connect: { + endpoints: [ + // "/
" + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + listen: { + endpoints: [ + // "/
" + ], + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 3000, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 200, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: true, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: true, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: true, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + /// Configuration of data messages timestamps management. + timestamping: { + /// Whether data messages should be timestamped if not already. + /// Accepts a single boolean value or different values for router, peer and client. + enabled: { router: true, peer: false, client: false + }, + /// Whether data messages with timestamps in the future should be dropped or not. + /// If set to false (default), messages with timestamps in the future are retimestamped. + /// Timestamps are ignored if timestamping is disabled. + drop_future_timestamp: false, + }, + /// The default timeout to apply to queries in milliseconds. + queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. + routing: { + /// The routing strategy to use in routers and it's configuration. + router: { + /// When set to true a router will forward data between two peers + /// directly connected to it if it detects that those peers are not + /// connected to each other. + /// The failover brokering only works if gossip discovery is enabled. + peers_failover_brokering: true, + }, + /// The routing strategy to use in peers and it's configuration. + peer: { + /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + mode: "peer_to_peer", + }, + }, + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, + // ], + // }, + // ], + + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive timeout to one fourth of the lease time. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: null, + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: false, + /// Path to the TLS client private key + client_private_key: null, + /// Path to the TLS client public certificate + client_certificate: null, + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + acl: { + enabled: true, + default_deny: true, + policy_list: { + "policy_definition": "UserId", + "ruleset": [ + { + "attribute": "UserId", + "rules": [ + { + "sub": "aaa3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "bbb3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "bbb3b411006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Read", + "permission": true + }, + { + "sub": "aaabbb11006ad57868988f9fec672a31", + "ke": "test/thr", + "action": "Write", + "permission": true + } + ] + }, + { + "attribute": "NetworkType", + "rules": [ + { + "sub": "wlan0", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "wlan0", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + }, + { + "attribute": "user_defined_attribute", + "rules": [ + { + "sub": "value_1", + "ke": "test/thr", + "action": "Write", + "permission": true + }, + { + "sub": "value_2", + "ke": "test/thr", + "action": "Read", + "permission": true + } + ] + } + ] + }, + }, + }, + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index 54c8a568a6..ab1bad15fd 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -221,4 +221,3 @@ impl Default for AclConfig { } } } - diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index f8986a6eb2..067e165b86 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -99,7 +99,7 @@ pub struct DownsamplingItemConf { //adding for authz structs -#[derive(Serialize, Deserialize,Clone,Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct PolicyList { policy_definition: String, ruleset: Vec, @@ -111,29 +111,31 @@ pub struct AttributeRules { rules: Vec, } -#[derive(Clone, Serialize,Debug, Deserialize)] +#[derive(Clone, Serialize, Debug, Deserialize)] pub struct AttributeRule { - sub: Attribute, + subject: Subject, ke: String, action: Action, - permission: bool, + permission: Permission, +} +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +pub enum Permission { + Allow, + Deny, } #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] -pub enum Attribute { +pub enum Subject { UserId(ZenohId), - NetworkType(String), //clarify - MetadataType(String), //clarify + NetworkInterface(String), //clarify + MetadataType(String), //clarify } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { - Read, - Write, - DeclareSub, - Delete, - DeclareQuery, + Pub, + Sub, } pub trait ConfigValidator: Send + Sync { diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index e4014e8cea..44315ad65e 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use zenoh_config::ZenohId; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push}, zenoh::PushBody, @@ -10,11 +9,20 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; use crate::net::routing::RoutingContext; use super::{ - authz::{Action, Attribute, PolicyEnforcer, RequestInfo}, + authz::{Action, PolicyEnforcer, Subject}, EgressInterceptor, IngressInterceptor, InterceptorFactoryTrait, InterceptorTrait, }; pub(crate) struct AclEnforcer { pub(crate) e: Arc, + pub(crate) interfaces: Option>, //to keep the interfaces +} +struct EgressAclEnforcer { + pe: Arc, + subject: i32, +} +struct IngressAclEnforcer { + pe: Arc, + subject: i32, } impl InterceptorFactoryTrait for AclEnforcer { @@ -22,15 +30,42 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - let uid = transport.get_zid().unwrap(); + // let uid = transport.get_zid().unwrap(); + if let Some(interfaces) = &self.interfaces { + log::debug!( + "New downsampler transport unicast config interfaces: {:?}", + interfaces + ); + if let Ok(links) = transport.get_links() { + for link in links { + log::debug!( + "New downsampler transport unicast link interfaces: {:?}", + link.interfaces + ); + if !link.interfaces.iter().any(|x| interfaces.contains(x)) { + return (None, None); + } + } + } + }; + let interface_key = Subject::NetworkInterface("wifi0".to_string()); + + //get value from the subject_map + let e = self.e.clone(); + let mut interface_value = 0; + if let Some(sm) = &e.subject_map { + interface_value = *sm.get(&interface_key).unwrap(); + } + + let pe = self.e.clone(); ( Some(Box::new(IngressAclEnforcer { - e: self.e.clone(), - zid: uid, + pe: pe.clone(), + subject: interface_value, })), Some(Box::new(EgressAclEnforcer { - zid: uid, - e: self.e.clone(), + pe: pe.clone(), + subject: interface_value, })), ) } @@ -39,56 +74,33 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { - let e = &self.e; - Some(Box::new(EgressAclEnforcer { - e: e.clone(), - zid: ZenohId::default(), - })) + None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - let e = &self.e; - Some(Box::new(IngressAclEnforcer { - e: e.clone(), - zid: ZenohId::default(), - })) + None } } -struct IngressAclEnforcer { - e: Arc, - zid: ZenohId, -} - impl InterceptorTrait for IngressAclEnforcer { fn intercept<'a>( &self, ctx: RoutingContext, ) -> Option> { + //intercept msg and send it to PEP if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - let e = &self.e; - let ke = ctx.full_expr().unwrap(); - let network_type = "wlan0"; //for testing - let mut sub_info: Vec = Vec::new(); - let attribute_list = e.get_attribute_list().unwrap(); - for i in attribute_list { - match i.as_str() { - "UserId" => sub_info.push(Attribute::UserId(self.zid)), - "NetworkType" => sub_info.push(Attribute::NetworkType(network_type.to_owned())), - _ => { //other metadata values - } - } - } - let request_info = RequestInfo { - sub: sub_info, - ke: ke.to_string(), - action: Action::Write, - }; - match e.policy_enforcement_point(request_info) { + let kexpr = ctx.full_expr().unwrap(); //add the cache here + + let subject = self.subject; + + match self + .pe + .policy_decision_point(subject, Action::Pub, kexpr.to_string()) + { Ok(decision) => { if !decision { return None; @@ -102,40 +114,24 @@ impl InterceptorTrait for IngressAclEnforcer { } } -struct EgressAclEnforcer { - e: Arc, - zid: ZenohId, -} - impl InterceptorTrait for EgressAclEnforcer { fn intercept( &self, ctx: RoutingContext, ) -> Option> { + // intercept msg and send it to PEP if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - let e = &self.e; - let ke = ctx.full_expr().unwrap(); - let network_type = "wlan0"; //for testing - let mut sub_info: Vec = Vec::new(); - let attribute_list = e.get_attribute_list().unwrap(); - for i in attribute_list { - match i.as_str() { - "UserId" => sub_info.push(Attribute::UserId(self.zid)), - "NetworkType" => sub_info.push(Attribute::NetworkType(network_type.to_owned())), - _ => { //other metadata values, - } - } - } - let request_info = RequestInfo { - sub: sub_info, - ke: ke.to_string(), - action: Action::Read, - }; - match e.policy_enforcement_point(request_info) { + let kexpr = ctx.full_expr().unwrap(); //add the cache here + + let subject = self.subject; + match self + .pe + .policy_decision_point(subject, Action::Sub, kexpr.to_string()) + { Ok(decision) => { if !decision { return None; diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 4c56735dd6..dd587cf3ae 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -5,35 +5,44 @@ use zenoh_config::AclConfig; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)] + pub enum Action { - Read, - Write, - DeclareSub, - Delete, - DeclareQuery, + Pub, + Sub, } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Request { - pub(crate) sub: Attribute, //removed String - pub(crate) obj: String, - pub(crate) action: Action, -} +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)] -pub struct RequestBuilder { - sub: Option, //removed Attribute - obj: Option, - action: Option, +pub enum Permission { + Allow, + Deny, } -#[derive(Deserialize,Debug)] +pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this +pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject + +// #[derive(Debug, Clone, Serialize, Deserialize)] +// pub struct Request { +// pub(crate) sub: Subject, +// pub(crate) obj: String, +// pub(crate) action: Action, +// } + +// pub struct RequestBuilder { +// sub: Option, //removed Attribute +// obj: Option, +// action: Option, +// } + +#[derive(Deserialize, Debug)] pub struct GetPolicy { policy_definition: String, ruleset: Vec, } type KeTreeRule = KeBoxTree; - +/* impl RequestBuilder { pub fn default() -> Self { RequestBuilder { @@ -46,7 +55,7 @@ impl RequestBuilder { RequestBuilder::default() } - pub fn sub(&mut self, sub: impl Into) -> &mut Self { + pub fn sub(&mut self, sub: impl Into) -> &mut Self { let _ = self.sub.insert(sub.into()); self } @@ -69,19 +78,20 @@ impl RequestBuilder { Ok(Request { sub, obj, action }) } } - -type SubActPolicy = FxHashMap; //replaces SinglePolic - +*/ pub struct PolicyEnforcer { - acl_enabled: bool, - default_deny: bool, - attribute_list: Option>, //should have all attribute names - policy_list: Option>, //stores policy-map for ID and each attribute + pub(crate) acl_enabled: bool, + pub(crate) default_deny: bool, + pub(crate) subject_map: Option>, //should have all attribute names + pub(crate) policy_list: Option, } +#[derive(Debug, Clone)] + pub struct PolicyInformation { policy_definition: String, attribute_list: Vec, //list of attribute names in string + subject_map: FxHashMap, policy_rules: Vec, } @@ -93,46 +103,42 @@ pub struct AttributeRules { #[derive(Clone, Debug, Deserialize)] pub struct AttributeRule { - sub: Attribute, + subject: Subject, ke: String, action: Action, - permission: bool, + permission: Permission, } use zenoh_config::ZenohId; #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] -pub enum Attribute { +pub enum Subject { UserId(ZenohId), - NetworkType(String), //clarify - MetadataType(String), //clarify + NetworkInterface(String), } -#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -pub struct SubAct(Attribute, Action); - #[derive(Debug)] pub struct RequestInfo { - pub sub: Vec, + pub sub: Vec, pub ke: String, pub action: Action, } +const ACTION_LENGTH: usize = 2; +const PERMISSION_LENGTH: usize = 2; + impl PolicyEnforcer { pub fn new() -> PolicyEnforcer { PolicyEnforcer { acl_enabled: true, default_deny: true, - attribute_list: None, + subject_map: None, policy_list: None, } } + pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { - /* - Initializes the policy for the control logic - loads policy into memory from file/network path - creates the policy hashmap with the ke-tries for ke matching - can have policy-type in the mix here...need to verify - */ + //returns Ok() for all good else returns Error + //insert values into the enforcer from the config file match acl_config.enabled { Some(val) => self.acl_enabled = val, None => log::error!("acl config not setup"), @@ -145,72 +151,51 @@ impl PolicyEnforcer { match acl_config.policy_list { Some(policy_list) => { let policy_information = self.policy_information_point(policy_list)?; - self.attribute_list = Some(policy_information.attribute_list); - let _policy_definition = policy_information.policy_definition; - self.build_policy_map( - self.attribute_list.clone().unwrap(), - policy_information.policy_rules, - )?; - log::info!("policy enforcer was initialised successfully"); + + self.subject_map = Some(policy_information.subject_map.clone()); + let mut main_policy: PolicyMap = PolicyMap(FxHashMap::default()); + + //first initialize the vector of vectors (needed to maintain the indices) + let subject_map = policy_information.subject_map.clone(); + for (_, index) in subject_map { + let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); + for _i in 0..ACTION_LENGTH { + let mut action_rule: Vec = Vec::new(); + for _j in 0..PERMISSION_LENGTH { + let permission_rule = KeTreeRule::new(); + // + action_rule.push(permission_rule); + } + rule.0.push(action_rule); + } + main_policy.0.insert(index, rule); + } + + for rules in policy_information.policy_rules { + for rule in rules.rules { + //add values to the ketree as per the rules + //get the subject index + let index = policy_information.subject_map.get(&rule.subject).unwrap(); + main_policy.0.get_mut(index).unwrap().0[rule.action as usize] + [rule.permission as usize] + .insert(keyexpr::new(&rule.ke)?, true); + } + } + //add to the policy_enforcer + self.policy_list = Some(main_policy); } None => log::error!("no policy list was specified"), } } Ok(()) } - pub fn build_policy_map( - &mut self, - attribute_list: Vec, - policy_rules_vector: Vec, - ) -> ZResult<()> { - /* - representaiton of policy list as a vector of hashmap of trees - each hashmap maps a subact (ID/Atttribute + Action) to a trie of allowed values - */ - //for each attrribute in the list, get rules, create map and push into rules_vector - let mut pm: Vec = Vec::new(); - for (i, _) in attribute_list.iter().enumerate() { - let rm = self.get_rules_list(policy_rules_vector[i].rules.clone())?; - pm.push(rm); - } - self.policy_list = Some(pm); - - Ok(()) - } - - pub fn get_rules_list(&self, rule_set: Vec) -> ZResult { - let mut policy: SubActPolicy = FxHashMap::default(); - for v in rule_set { - // for now permission being false means this KE will not be inserted into the trie of allowed KEs - let perm = v.permission; - if !perm { - continue; - } - let sub = v.sub; - let ke = v.ke; - let subact = SubAct(sub, v.action); - let subact_value_exists = policy.contains_key(&subact); - if subact_value_exists { - let ketree = policy.get_mut(&subact).unwrap(); - ketree.insert(keyexpr::new(&ke)?, true); - } else { - //create new entry for subject + ke-tree - let mut ketree = KeTreeRule::new(); - ketree.insert(keyexpr::new(&ke)?, true); - policy.insert(subact, ketree); - } - } - Ok(policy) - } - - //if policy_list exists, get that value directly and use it for get policy - - pub fn policy_information_point(&self, policy_list : zenoh_config::PolicyList)->ZResult{ - - //let policy_list_info: GetPolicy;// = GetPolicy{ - let value = serde_json::to_value(&policy_list).unwrap(); - let policy_list_info: GetPolicy = serde_json::from_value(value)?; - let enforced_attributes = policy_list_info + pub fn policy_information_point( + &self, + policy_list: zenoh_config::PolicyList, + ) -> ZResult { + let value = serde_json::to_value(&policy_list).unwrap(); + let policy_list_info: GetPolicy = serde_json::from_value(value)?; + let enforced_attributes = policy_list_info .policy_definition .split(' ') .collect::>(); @@ -218,10 +203,16 @@ impl PolicyEnforcer { let complete_ruleset = policy_list_info.ruleset; let mut attribute_list: Vec = Vec::new(); let mut policy_rules: Vec = Vec::new(); - for rule in complete_ruleset.iter() { - if enforced_attributes.contains(&rule.attribute.as_str()) { - attribute_list.push(rule.attribute.clone()); - policy_rules.push(rule.clone()) + let mut subject_map = FxHashMap::default(); + let mut counter = 1; //starting at 1 since 0 is default value in policy_check and should not match anything + for attr_rule in complete_ruleset.iter() { + if enforced_attributes.contains(&attr_rule.attribute.as_str()) { + attribute_list.push(attr_rule.attribute.clone()); + policy_rules.push(attr_rule.clone()); + for rule in attr_rule.rules.clone() { + subject_map.insert(rule.subject, counter); + counter += 1; + } } } @@ -230,102 +221,39 @@ impl PolicyEnforcer { Ok(PolicyInformation { policy_definition, attribute_list, + subject_map, policy_rules, - }) - - } - // pub fn policy_resource_point(&self, file_path: &str) -> ZResult { - // //read file - - // let policy_file_info: GetPolicy = { - // let data = fs::read_to_string(file_path).expect("error reading file"); - // serde_json::from_str(&data).expect("error parsing from json to struct") - // }; - - // //get the rules mentioned in the policy definition - // let enforced_attributes = policy_file_info - // .policy_definition - // .split(' ') - // .collect::>(); - - // let complete_ruleset = policy_file_info.rules; - // let mut attribute_list: Vec = Vec::new(); - // let mut policy_rules: Vec = Vec::new(); - // for rule in complete_ruleset.iter() { - // if enforced_attributes.contains(&rule.attribute_name.as_str()) { - // attribute_list.push(rule.attribute_name.clone()); - // policy_rules.push(rule.clone()) - // } - // } - - // let policy_definition = policy_file_info.policy_definition; - - // Ok(PolicyInformation { - // policy_definition, - // attribute_list, - // policy_rules, - // }) - // } - - pub fn policy_enforcement_point(&self, request_info: RequestInfo) -> ZResult { - /* - input: request_info from interceptor - output: decision = allow/deny permission [true/false] - function: builds the request and passes it to policy_decision_point() - collects results (for each attribute in subject list) from PDP - and then uses that to drop or pass the msg to routing table - */ - - let obj = request_info.ke; - let mut decision = true; - let mut decisions: Vec = Vec::new(); //to store all decisions for each subject in list - for (attribute_index, val) in request_info.sub.into_iter().enumerate() { - //build request - let request = RequestBuilder::new() - .sub(val) - .obj(obj.clone()) - .action(request_info.action.clone()) - .build()?; - let d = self.policy_decision_point(attribute_index, request); - decisions.push(d); - decision = decision & d; - } - - //let decision: bool = decisions.iter().map(|d,x=true|x=d&x);// decisions[0]; //only checks for single attribute right now - Ok(decision) + }) } - pub fn policy_decision_point(&self, index: usize, request: Request) -> bool { + pub fn policy_decision_point(&self, subject: i32, act: Action, kexpr: String) -> ZResult { /* - input: (request) - output: true(allow)/false(deny) - function: process the request received from PEP against the policy (self) - the policy list is chosen based on the policy-type specified in the rules file - policy list is be a hashmap of subject->ketries (test and discuss) + need to decide policy for proper handling of the edge cases + what about default_deny vlaue from the policy file?? + if it is allow, the should be allowed if everything is NONE */ - - //compare the request to the vec of values...matching depends on the value of the policy type - - //return true; - let ke = request.obj; - let sub = request.sub; - let action = request.action; - let subact = SubAct(sub, action); - //find index of attribute name in attribute list - //then use attribute_rules from same index - if let Some(policy_list) = &self.policy_list { - match policy_list[index].get(&subact) { - Some(ktrie) => { - let result = ktrie.nodes_including(keyexpr::new(&ke).unwrap()).count(); - return result != 0; + match &self.policy_list { + Some(policy_map) => { + let ps = policy_map.0.get(&subject).unwrap(); + let perm_vec = &ps.0[act as usize]; + + //check for deny + + let deny_result = perm_vec[Permission::Deny as usize] + .nodes_including(keyexpr::new(&kexpr).unwrap()) + .count(); + if deny_result != 0 { + return Ok(false); } - None => return false, + + //check for allow + + let allow_result = perm_vec[Permission::Allow as usize] + .nodes_including(keyexpr::new(&kexpr).unwrap()) + .count(); + Ok(allow_result != 0) } + None => Ok(false), } - false - } - - pub fn get_attribute_list(&self) -> Option> { - self.attribute_list.clone() } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index f8b9f2d389..0c2b3f0c35 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -65,7 +65,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult acl_enabled = val, @@ -78,6 +78,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult res.push(Box::new(AclEnforcer { e: Arc::new(policy_enforcer), + interfaces: None, })), Err(e) => log::error!( "access control enabled but not initialized with error {}!", From 1b4a949e13f0121b24dbfe15c6b5ddcebe8b9654 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 28 Feb 2024 17:46:57 +0100 Subject: [PATCH 061/122] WIP:ACL with networkinterface --- DEFAULT_CONFIG.json5 | 0 DEFAULT_CONFIG.json5_old.json5 | 502 --------------------------------- 2 files changed, 502 deletions(-) mode change 100644 => 100755 DEFAULT_CONFIG.json5 delete mode 100644 DEFAULT_CONFIG.json5_old.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100644 new mode 100755 diff --git a/DEFAULT_CONFIG.json5_old.json5 b/DEFAULT_CONFIG.json5_old.json5 deleted file mode 100644 index b68718ac05..0000000000 --- a/DEFAULT_CONFIG.json5_old.json5 +++ /dev/null @@ -1,502 +0,0 @@ -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - id: "aaabbb11006ad57868988f9fec672a31", - /// The node's mode (router, peer or client) - mode: "router", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - metadata: { - name: "strawberry", - location: "Penny Lane" - }, - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - connect: { - endpoints: [ - // "/
" - ], - }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - listen: { - endpoints: [ - // "/
" - ], - }, - /// Configure the scouting mechanisms and their behaviours - scouting: { - /// In client mode, the period dedicated to scouting for a router before failing - timeout: 3000, - /// In peer mode, the period dedicated to scouting remote peers before attempting other operations - delay: 200, - /// The multicast scouting configuration. - multicast: { - /// Whether multicast scouting is enabled or not - enabled: true, - /// The socket which should be used for multicast scouting - address: "224.0.0.224:7446", - /// The network interface which should be used for multicast scouting - interface: "auto", // If not set or set to "auto" the interface if picked automatically - /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - /// Accepts a single value or different values for router, peer and client. - /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, - /// Whether or not to listen for scout messages on UDP multicast and reply to them. - listen: true, - }, - /// The gossip scouting configuration. - gossip: { - /// Whether gossip scouting is enabled or not - enabled: true, - /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - /// When false, gossip scouting informations are only propagated to the next hop. - /// Activating multihop gossip implies more scouting traffic and a lower scalability. - /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have - /// direct connectivity with each other. - multihop: false, - /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - /// Accepts a single value or different values for router, peer and client. - /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, - }, - }, - /// Configuration of data messages timestamps management. - timestamping: { - /// Whether data messages should be timestamped if not already. - /// Accepts a single boolean value or different values for router, peer and client. - enabled: { router: true, peer: false, client: false - }, - /// Whether data messages with timestamps in the future should be dropped or not. - /// If set to false (default), messages with timestamps in the future are retimestamped. - /// Timestamps are ignored if timestamping is disabled. - drop_future_timestamp: false, - }, - /// The default timeout to apply to queries in milliseconds. - queries_default_timeout: 10000, - /// The routing strategy to use and it's configuration. - routing: { - /// The routing strategy to use in routers and it's configuration. - router: { - /// When set to true a router will forward data between two peers - /// directly connected to it if it detects that those peers are not - /// connected to each other. - /// The failover brokering only works if gossip discovery is enabled. - peers_failover_brokering: true, - }, - /// The routing strategy to use in peers and it's configuration. - peer: { - /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). - mode: "peer_to_peer", - }, - }, - // /// The declarations aggregation strategy. - // aggregation: { - // /// A list of key-expressions for which all included subscribers will be aggregated into. - // subscribers: [ - // // key_expression - // ], - // /// A list of key-expressions for which all included publishers will be aggregated into. - // publishers: [ - // // key_expression - // ], - // }, - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the rate (maximum frequency in Hertz) - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", rate: 0.1 }, - // ], - // }, - // ], - - /// Configure internal transport parameters - transport: { - unicast: { - /// Timeout in milliseconds when opening a link - accept_timeout: 10000, - /// Maximum number of zenoh session in pending state while accepting - accept_pending: 100, - /// Maximum number of sessions that can be simultaneously alive - max_sessions: 1000, - /// Maximum number of incoming links that are admitted per session - max_links: 1, - /// Enables the LowLatency transport - /// This option does not make LowLatency transport mandatory, the actual implementation of transport - /// used will depend on Establish procedure and other party's settings - /// - /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - /// enable 'lowlatency' you need to explicitly disable 'qos'. - lowlatency: false, - /// Enables QoS on unicast communications. - qos: { - enabled: true, - }, - /// Enables compression on unicast communications. - /// Compression capabilities are negotiated during session establishment. - /// If both Zenoh nodes support compression, then compression is activated. - compression: { - enabled: false, - }, - }, - multicast: { - /// Enables QoS on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - qos: { - enabled: false, - }, - /// Enables compression on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - compression: { - enabled: false, - }, - }, - link: { - /// An optional whitelist of protocols to be used for accepting and opening sessions. - /// If not configured, all the supported protocols are automatically whitelisted. - /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] - /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], - /// Configure the zenoh TX parameters of a link - tx: { - /// The resolution in bits to be used for the message sequence numbers. - /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - /// Accepted values: 8bit, 16bit, 32bit, 64bit. - sequence_number_resolution: "32bit", - /// Link lease duration in milliseconds to announce to other zenoh nodes - lease: 10000, - /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - /// messages will be sent at the configured time interval. - /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - /// set the actual keep_alive timeout to one fourth of the lease time. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - /// check which considers a link as failed when no messages are received in 3.5 times the - /// target interval. - keep_alive: 4, - /// Batch size in bytes is expressed as a 16bit unsigned integer. - /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - /// The default batch size value is the maximum batch size: 65535. - batch_size: 65535, - /// Each zenoh link has a transmission queue that can be configured - queue: { - /// The size of each priority queue indicates the number of batches a given queue can contain. - /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - /// If qos is false, then only the DATA priority will be allocated. - size: { - control: 1, - real_time: 1, - interactive_high: 1, - interactive_low: 1, - data_high: 2, - data: 4, - data_low: 4, - background: 4, - }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, - }, - }, - /// Configure the zenoh RX parameters of a link - rx: { - /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - /// more in-flight data. This is particularly relevant when dealing with large messages. - /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - buffer_size: 65535, - /// Maximum size of the defragmentation buffer at receiver end. - /// Fragmented messages that are larger than the configured size will be dropped. - /// The default value is 1GiB. This would work in most scenarios. - /// NOTE: reduce the value if you are operating on a memory constrained device. - max_message_size: 1073741824, - }, - /// Configure TLS specific parameters - tls: { - /// Path to the certificate of the certificate authority used to validate either the server - /// or the client's keys and certificates, depending on the node's mode. If not specified - /// on router mode then the default WebPKI certificates are used instead. - root_ca_certificate: null, - /// Path to the TLS server private key - server_private_key: null, - /// Path to the TLS server public certificate - server_certificate: null, - /// Client authentication, if true enables mTLS (mutual authentication) - client_auth: false, - /// Path to the TLS client private key - client_private_key: null, - /// Path to the TLS client public certificate - client_certificate: null, - // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - server_name_verification: null, - }, - }, - /// Shared memory configuration - shared_memory: { - enabled: false, - }, - /// Access control configuration - auth: { - /// The configuration of authentification. - /// A password implies a username is required. - usrpwd: { - user: null, - password: null, - /// The path to a file containing the user password dictionary - dictionary_file: null, - }, - pubkey: { - public_key_pem: null, - private_key_pem: null, - public_key_file: null, - private_key_file: null, - key_size: null, - known_keys_file: null, - }, - }, - acl: { - enabled: true, - default_deny: true, - policy_list: { - "policy_definition": "UserId", - "ruleset": [ - { - "attribute": "UserId", - "rules": [ - { - "sub": "aaa3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "bbb3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "bbb3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Write", - "permission": true - } - ] - }, - { - "attribute": "NetworkType", - "rules": [ - { - "sub": "wlan0", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "wlan0", - "ke": "test/thr", - "action": "Read", - "permission": true - } - ] - }, - { - "attribute": "user_defined_attribute", - "rules": [ - { - "sub": "value_1", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "value_2", - "ke": "test/thr", - "action": "Read", - "permission": true - } - ] - } - ] - }, - }, - }, - /// Configure the Admin Space - /// Unstable: this configuration part works as advertised, but may change in a future release - adminspace: { - // read and/or write permissions on the admin space - permissions: { - read: true, - write: false, - }, - }, - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, -} From f9338437a946619471f7464971bd0211b9f47697 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 28 Feb 2024 17:47:17 +0100 Subject: [PATCH 062/122] WIP:ACL with networkinterface --- DEFAULT_CONFIG.json5 | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 DEFAULT_CONFIG.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 old mode 100755 new mode 100644 From fb83d00d4970ecbe44f3912f00456d7efe387351 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 28 Feb 2024 17:48:29 +0100 Subject: [PATCH 063/122] WIP:ACL with networkinterface --- examples/examples/z_pub_thr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index 08d82d2852..df91a74cbc 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -28,7 +28,7 @@ fn main() { prio = p.try_into().unwrap(); } - let payload_size: usize = 1024; + let payload_size: usize = args.payload_size; let data: Value = (0..payload_size) .map(|i| (i % 10) as u8) From 9257ac709bcc53070364aa53ff27cc669e31c903 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 29 Feb 2024 09:05:27 +0100 Subject: [PATCH 064/122] WIP:Added multi-interface funcionality --- DEFAULT_CONFIG.json5 | 4 +- .../net/routing/interceptor/accesscontrol.rs | 158 ++++++++++++------ zenoh/src/net/routing/interceptor/authz.rs | 49 ------ zenoh/src/net/routing/interceptor/mod.rs | 3 +- 4 files changed, 114 insertions(+), 100 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 12d98591a2..c29a733d10 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -283,13 +283,13 @@ "attribute": "NetworkInterface", "rules": [ { - "subject": "wifi0", + "subject": "lo0", "ke": "test/thr", "action": "Pub", "permission": "Allow" }, { - "subject": "wifi0", + "subject": "lo0", "ke": "test/thr", "action": "Sub", "permission": "Allow" diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index 44315ad65e..7dd1e63d4a 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -1,5 +1,4 @@ use std::sync::Arc; - use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push}, zenoh::PushBody, @@ -14,15 +13,55 @@ use super::{ }; pub(crate) struct AclEnforcer { pub(crate) e: Arc, - pub(crate) interfaces: Option>, //to keep the interfaces + // pub(crate) interfaces: Option>, //to keep the interfaces } + +// pub(crate) fn acl_enforcer_init( +// config: &Config, +// ) -> ZResult> { + +// let res :InterceptorFactory; + +// let acl_config = config.transport().acl().clone(); + +// let mut acl_enabled = false; +// match acl_config.enabled { +// Some(val) => acl_enabled = val, +// None => { +// log::warn!("acl config not setup"); +// } +// } +// if acl_enabled { +// let mut interface_value = +// // let mut policy_enforcer = PolicyEnforcer::new(); +// match policy_enforcer.init(acl_config) { +// Ok(_) => res.push(Box::new(AclEnforcer { +// e: Arc::new(policy_enforcer), +// interfaces: None, +// })), +// Err(e) => log::error!( +// "access control enabled but not initialized with error {}!", +// e +// ), +// } +// } + +// let mut res: Vec = vec![]; + +// for ds in config { +// res.push(Box::new(DownsamplingInterceptorFactory::new(ds.clone()))); +// } + +// Ok(res) +// } + struct EgressAclEnforcer { pe: Arc, - subject: i32, + interface_list: Vec, } struct IngressAclEnforcer { pe: Arc, - subject: i32, + interface_list: Vec, } impl InterceptorFactoryTrait for AclEnforcer { @@ -30,42 +69,40 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - // let uid = transport.get_zid().unwrap(); - if let Some(interfaces) = &self.interfaces { - log::debug!( - "New downsampler transport unicast config interfaces: {:?}", - interfaces - ); - if let Ok(links) = transport.get_links() { - for link in links { - log::debug!( - "New downsampler transport unicast link interfaces: {:?}", - link.interfaces - ); - if !link.interfaces.iter().any(|x| interfaces.contains(x)) { - return (None, None); + let mut interface_list: Vec = Vec::new(); + if let Ok(links) = transport.get_links() { + println!("links are {:?}", links); + for link in links { + let e = self.e.clone(); + if let Some(sm) = &e.subject_map { + for i in link.interfaces { + let x = &Subject::NetworkInterface(i.to_string()); + if sm.contains_key(x) { + interface_list.push(*sm.get(x).unwrap()); + } } } } - }; - let interface_key = Subject::NetworkInterface("wifi0".to_string()); - - //get value from the subject_map - let e = self.e.clone(); - let mut interface_value = 0; - if let Some(sm) = &e.subject_map { - interface_value = *sm.get(&interface_key).unwrap(); } + // let interface_key = Subject::NetworkInterface("wifi0".to_string()); + + // //get value from the subject_map + // let e = self.e.clone(); + // let mut interface_value = 0; + // if let Some(sm) = &e.subject_map { + // interface_value = *sm.get(&interface_key).unwrap(); + // } + let pe = self.e.clone(); ( Some(Box::new(IngressAclEnforcer { pe: pe.clone(), - subject: interface_value, + interface_list: interface_list.clone(), })), Some(Box::new(EgressAclEnforcer { pe: pe.clone(), - subject: interface_value, + interface_list, })), ) } @@ -95,18 +132,24 @@ impl InterceptorTrait for IngressAclEnforcer { { let kexpr = ctx.full_expr().unwrap(); //add the cache here - let subject = self.subject; - - match self - .pe - .policy_decision_point(subject, Action::Pub, kexpr.to_string()) - { - Ok(decision) => { - if !decision { - return None; + let subject_list = &self.interface_list; + let mut decision = false; + for subject in subject_list { + match self + .pe + .policy_decision_point(*subject, Action::Pub, kexpr.to_string()) + { + Ok(val) => { + if val { + decision = val; + break; + } } + Err(_) => return None, } - Err(_) => return None, + } + if !decision { + return None; } } @@ -127,18 +170,39 @@ impl InterceptorTrait for EgressAclEnforcer { { let kexpr = ctx.full_expr().unwrap(); //add the cache here - let subject = self.subject; - match self - .pe - .policy_decision_point(subject, Action::Sub, kexpr.to_string()) - { - Ok(decision) => { - if !decision { - return None; + let subject_list = &self.interface_list; + let mut decision = false; + for subject in subject_list { + match self + .pe + .policy_decision_point(*subject, Action::Sub, kexpr.to_string()) + { + Ok(val) => { + if val { + decision = val; + break; + } } + Err(_) => return None, } - Err(_) => return None, } + if !decision { + return None; + } + // let kexpr = ctx.full_expr().unwrap(); //add the cache here + + // let subject = self.subject; + // match self + // .pe + // .policy_decision_point(subject, Action::Sub, kexpr.to_string()) + // { + // Ok(decision) => { + // if !decision { + // return None; + // } + // } + // Err(_) => return None, + // } } Some(ctx) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index dd587cf3ae..78091b3277 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -23,62 +23,13 @@ pub enum Permission { pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject -// #[derive(Debug, Clone, Serialize, Deserialize)] -// pub struct Request { -// pub(crate) sub: Subject, -// pub(crate) obj: String, -// pub(crate) action: Action, -// } - -// pub struct RequestBuilder { -// sub: Option, //removed Attribute -// obj: Option, -// action: Option, -// } - #[derive(Deserialize, Debug)] pub struct GetPolicy { policy_definition: String, ruleset: Vec, } type KeTreeRule = KeBoxTree; -/* -impl RequestBuilder { - pub fn default() -> Self { - RequestBuilder { - sub: None, - obj: None, - action: None, - } - } - pub fn new() -> Self { - RequestBuilder::default() - } - - pub fn sub(&mut self, sub: impl Into) -> &mut Self { - let _ = self.sub.insert(sub.into()); - self - } - pub fn obj(&mut self, obj: impl Into) -> &mut Self { - let _ = self.obj.insert(obj.into()); - self - } - - pub fn action(&mut self, action: impl Into) -> &mut Self { - let _ = self.action.insert(action.into()); - self - } - - pub fn build(&mut self) -> ZResult { - let sub = self.sub.clone().unwrap(); - let obj = self.obj.clone().unwrap(); - let action = self.action.clone().unwrap(); - - Ok(Request { sub, obj, action }) - } -} -*/ pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_deny: bool, diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 0c2b3f0c35..c43e3d0792 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -62,7 +62,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult ZResult res.push(Box::new(AclEnforcer { e: Arc::new(policy_enforcer), - interfaces: None, })), Err(e) => log::error!( "access control enabled but not initialized with error {}!", From 3cb97d2045390a77e5ffe3e36ce8a75cb558fd31 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 29 Feb 2024 16:13:59 +0100 Subject: [PATCH 065/122] WIP: Improved code design for ACL --- DEFAULT_CONFIG.json5 | 14 +- commons/zenoh-config/src/lib.rs | 21 +-- .../net/routing/interceptor/accesscontrol.rs | 134 +++++++----------- zenoh/src/net/routing/interceptor/authz.rs | 107 +++++++------- zenoh/src/net/routing/interceptor/mod.rs | 33 +---- 5 files changed, 128 insertions(+), 181 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index c29a733d10..bacfb9987b 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -275,17 +275,23 @@ }, acl: { enabled: true, - default_deny: true, + default_deny: false, policy_list: { "policy_definition": "NetworkInterface", "ruleset": [ { "attribute": "NetworkInterface", "rules": [ + { + "subject": "lo0", + "ke": "test/thr/a", + "action": "Put", + "permission": "Allow" + }, { "subject": "lo0", "ke": "test/thr", - "action": "Pub", + "action": "Put", "permission": "Allow" }, { @@ -297,8 +303,8 @@ ] } ] - }, - }, + } + } }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 067e165b86..689d8d93c2 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -101,22 +101,22 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Deserialize, Clone, Debug)] pub struct PolicyList { - policy_definition: String, - ruleset: Vec, + pub policy_definition: String, + pub ruleset: Vec, } #[derive(Debug, Serialize, Deserialize, Clone)] pub struct AttributeRules { - attribute: String, - rules: Vec, + pub attribute: String, + pub rules: Vec, } #[derive(Clone, Serialize, Debug, Deserialize)] pub struct AttributeRule { - subject: Subject, - ke: String, - action: Action, - permission: Permission, + pub subject: Subject, + pub ke: String, + pub action: Action, + pub permission: Permission, } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Permission { @@ -134,10 +134,11 @@ pub enum Subject { #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Action { - Pub, + Put, Sub, + Get, + Reply, } - pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index 7dd1e63d4a..97ce8a8865 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -1,60 +1,22 @@ use std::sync::Arc; +use zenoh_config::{AclConfig, Action, Subject}; use zenoh_protocol::{ - network::{NetworkBody, NetworkMessage, Push}, - zenoh::PushBody, + network::{NetworkBody, NetworkMessage, Push, Request, Response}, + zenoh::{PushBody, RequestBody, ResponseBody}, }; +use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; use crate::net::routing::RoutingContext; use super::{ - authz::{Action, PolicyEnforcer, Subject}, - EgressInterceptor, IngressInterceptor, InterceptorFactoryTrait, InterceptorTrait, + authz::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + InterceptorFactoryTrait, InterceptorTrait, }; pub(crate) struct AclEnforcer { pub(crate) e: Arc, - // pub(crate) interfaces: Option>, //to keep the interfaces } -// pub(crate) fn acl_enforcer_init( -// config: &Config, -// ) -> ZResult> { - -// let res :InterceptorFactory; - -// let acl_config = config.transport().acl().clone(); - -// let mut acl_enabled = false; -// match acl_config.enabled { -// Some(val) => acl_enabled = val, -// None => { -// log::warn!("acl config not setup"); -// } -// } -// if acl_enabled { -// let mut interface_value = -// // let mut policy_enforcer = PolicyEnforcer::new(); -// match policy_enforcer.init(acl_config) { -// Ok(_) => res.push(Box::new(AclEnforcer { -// e: Arc::new(policy_enforcer), -// interfaces: None, -// })), -// Err(e) => log::error!( -// "access control enabled but not initialized with error {}!", -// e -// ), -// } -// } - -// let mut res: Vec = vec![]; - -// for ds in config { -// res.push(Box::new(DownsamplingInterceptorFactory::new(ds.clone()))); -// } - -// Ok(res) -// } - struct EgressAclEnforcer { pe: Arc, interface_list: Vec, @@ -64,6 +26,34 @@ struct IngressAclEnforcer { interface_list: Vec, } +pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult> { + let mut res: Vec = vec![]; + let mut acl_enabled = false; + match acl_config.enabled { + Some(val) => acl_enabled = val, + None => { + log::warn!("acl config is not setup"); + //return Ok(res); + } + } + if acl_enabled { + let mut policy_enforcer = PolicyEnforcer::new(); + match policy_enforcer.init(acl_config) { + Ok(_) => { + log::debug!("access control is enabled and initialized"); + res.push(Box::new(AclEnforcer { + e: Arc::new(policy_enforcer), + })) + } + Err(e) => log::error!( + "access control enabled but not initialized with error {}!", + e + ), + } + } + Ok(res) +} + impl InterceptorFactoryTrait for AclEnforcer { fn new_transport_unicast( &self, @@ -71,7 +61,7 @@ impl InterceptorFactoryTrait for AclEnforcer { ) -> (Option, Option) { let mut interface_list: Vec = Vec::new(); if let Ok(links) = transport.get_links() { - println!("links are {:?}", links); + log::debug!("acl interceptor links details {:?}", links); for link in links { let e = self.e.clone(); if let Some(sm) = &e.subject_map { @@ -84,16 +74,7 @@ impl InterceptorFactoryTrait for AclEnforcer { } } } - - // let interface_key = Subject::NetworkInterface("wifi0".to_string()); - - // //get value from the subject_map - // let e = self.e.clone(); - // let mut interface_value = 0; - // if let Some(sm) = &e.subject_map { - // interface_value = *sm.get(&interface_key).unwrap(); - // } - + log::debug!("log info"); let pe = self.e.clone(); ( Some(Box::new(IngressAclEnforcer { @@ -124,20 +105,28 @@ impl InterceptorTrait for IngressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - //intercept msg and send it to PEP + let kexpr = ctx.full_expr().unwrap(); //add the cache here + let interface_list = &self.interface_list; + if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. + }) + | NetworkBody::Request(Request { + payload: RequestBody::Put(_), + .. + }) + | NetworkBody::Response(Response { + payload: ResponseBody::Put(_), + .. }) = &ctx.msg.body { - let kexpr = ctx.full_expr().unwrap(); //add the cache here - - let subject_list = &self.interface_list; + // let action = Action::Put; let mut decision = false; - for subject in subject_list { + for subject in interface_list { match self .pe - .policy_decision_point(*subject, Action::Pub, kexpr.to_string()) + .policy_decision_point(*subject, Action::Put, kexpr.to_string()) { Ok(val) => { if val { @@ -162,17 +151,16 @@ impl InterceptorTrait for EgressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - // intercept msg and send it to PEP + let kexpr = ctx.full_expr().unwrap(); //add the cache here + let interface_list = &self.interface_list; if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - let kexpr = ctx.full_expr().unwrap(); //add the cache here - - let subject_list = &self.interface_list; + // let action = ; let mut decision = false; - for subject in subject_list { + for subject in interface_list { match self .pe .policy_decision_point(*subject, Action::Sub, kexpr.to_string()) @@ -189,20 +177,6 @@ impl InterceptorTrait for EgressAclEnforcer { if !decision { return None; } - // let kexpr = ctx.full_expr().unwrap(); //add the cache here - - // let subject = self.subject; - // match self - // .pe - // .policy_decision_point(subject, Action::Sub, kexpr.to_string()) - // { - // Ok(decision) => { - // if !decision { - // return None; - // } - // } - // Err(_) => return None, - // } } Some(ctx) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 78091b3277..10b8a8a46f 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,24 +1,12 @@ use rustc_hash::FxHashMap; -use serde::{Deserialize, Serialize}; -use std::hash::Hash; -use zenoh_config::AclConfig; +use serde::Deserialize; +use zenoh_config::{AclConfig, Action, AttributeRules, Permission, Subject}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)] - -pub enum Action { - Pub, - Sub, -} - -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)] - -pub enum Permission { - Allow, - Deny, -} +const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (small but might change) +const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum (fixed) pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject @@ -40,33 +28,33 @@ pub struct PolicyEnforcer { #[derive(Debug, Clone)] pub struct PolicyInformation { - policy_definition: String, - attribute_list: Vec, //list of attribute names in string + _policy_definition: String, + _attribute_list: Vec, //list of attribute names in string subject_map: FxHashMap, policy_rules: Vec, } -#[derive(Debug, Deserialize, Clone)] -pub struct AttributeRules { - attribute: String, - rules: Vec, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct AttributeRule { - subject: Subject, - ke: String, - action: Action, - permission: Permission, -} -use zenoh_config::ZenohId; - -#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -#[serde(untagged)] -pub enum Subject { - UserId(ZenohId), - NetworkInterface(String), -} +// #[derive(Debug, Deserialize, Clone)] +// pub struct AttributeRules { +// attribute: String, +// rules: Vec, +// } + +// #[derive(Clone, Debug, Deserialize)] +// pub struct AttributeRule { +// subject: Subject, +// ke: String, +// action: Action, +// permission: Permission, +// } +// use zenoh_config::ZenohId; + +// #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] +// #[serde(untagged)] +// pub enum Subject { +// UserId(ZenohId), +// NetworkInterface(String), +// } #[derive(Debug)] pub struct RequestInfo { pub sub: Vec, @@ -74,9 +62,6 @@ pub struct RequestInfo { pub action: Action, } -const ACTION_LENGTH: usize = 2; -const PERMISSION_LENGTH: usize = 2; - impl PolicyEnforcer { pub fn new() -> PolicyEnforcer { PolicyEnforcer { @@ -92,7 +77,7 @@ impl PolicyEnforcer { //insert values into the enforcer from the config file match acl_config.enabled { Some(val) => self.acl_enabled = val, - None => log::error!("acl config not setup"), + None => log::error!("acl config was not setup properly"), } match acl_config.default_deny { Some(val) => self.default_deny = val, @@ -110,9 +95,9 @@ impl PolicyEnforcer { let subject_map = policy_information.subject_map.clone(); for (_, index) in subject_map { let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); - for _i in 0..ACTION_LENGTH { + for _i in 0..NUMBER_OF_ACTIONS { let mut action_rule: Vec = Vec::new(); - for _j in 0..PERMISSION_LENGTH { + for _j in 0..NUMBER_OF_PERMISSIONS { let permission_rule = KeTreeRule::new(); // action_rule.push(permission_rule); @@ -152,13 +137,13 @@ impl PolicyEnforcer { .collect::>(); let complete_ruleset = policy_list_info.ruleset; - let mut attribute_list: Vec = Vec::new(); + let mut _attribute_list: Vec = Vec::new(); let mut policy_rules: Vec = Vec::new(); let mut subject_map = FxHashMap::default(); let mut counter = 1; //starting at 1 since 0 is default value in policy_check and should not match anything for attr_rule in complete_ruleset.iter() { if enforced_attributes.contains(&attr_rule.attribute.as_str()) { - attribute_list.push(attr_rule.attribute.clone()); + _attribute_list.push(attr_rule.attribute.clone()); policy_rules.push(attr_rule.clone()); for rule in attr_rule.rules.clone() { subject_map.insert(rule.subject, counter); @@ -167,31 +152,35 @@ impl PolicyEnforcer { } } - let policy_definition = policy_list_info.policy_definition; + let _policy_definition = policy_list_info.policy_definition; Ok(PolicyInformation { - policy_definition, - attribute_list, + _policy_definition, + _attribute_list, subject_map, policy_rules, }) } - pub fn policy_decision_point(&self, subject: i32, act: Action, kexpr: String) -> ZResult { - /* - need to decide policy for proper handling of the edge cases - what about default_deny vlaue from the policy file?? - if it is allow, the should be allowed if everything is NONE - */ + /* + checks each msg against the ACL ruleset for allow/deny + */ + + pub fn policy_decision_point( + &self, + subject: i32, + action: Action, + key_expr: String, + ) -> ZResult { match &self.policy_list { Some(policy_map) => { let ps = policy_map.0.get(&subject).unwrap(); - let perm_vec = &ps.0[act as usize]; + let perm_vec = &ps.0[action as usize]; //check for deny let deny_result = perm_vec[Permission::Deny as usize] - .nodes_including(keyexpr::new(&kexpr).unwrap()) + .nodes_including(keyexpr::new(&key_expr).unwrap()) .count(); if deny_result != 0 { return Ok(false); @@ -200,7 +189,7 @@ impl PolicyEnforcer { //check for allow let allow_result = perm_vec[Permission::Allow as usize] - .nodes_including(keyexpr::new(&kexpr).unwrap()) + .nodes_including(keyexpr::new(&key_expr).unwrap()) .count(); Ok(allow_result != 0) } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index c43e3d0792..fbf1d1e844 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -19,11 +19,11 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) //! mod accesscontrol; -mod authz; -use std::sync::Arc; +use accesscontrol::acl_interceptor_factories; +mod authz; use super::RoutingContext; -use crate::net::routing::interceptor::{accesscontrol::AclEnforcer, authz::PolicyEnforcer}; + use zenoh_config::Config; use zenoh_protocol::network::NetworkMessage; use zenoh_result::ZResult; @@ -59,32 +59,9 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult acl_enabled = val, - None => { - log::warn!("acl config not setup"); - } - } - if acl_enabled { - let mut policy_enforcer = PolicyEnforcer::new(); - match policy_enforcer.init(acl_config) { - Ok(_) => res.push(Box::new(AclEnforcer { - e: Arc::new(policy_enforcer), - })), - Err(e) => log::error!( - "access control enabled but not initialized with error {}!", - e - ), - } - } Ok(res) } From 9af653e7383f6755d0295586638abadce23a3f2c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 1 Mar 2024 12:54:18 +0100 Subject: [PATCH 066/122] WIP: Modified for new config style --- DEFAULT_CONFIG.json5 | 150 ++++++++++++++---- commons/zenoh-config/src/defaults.rs | 4 +- commons/zenoh-config/src/lib.rs | 45 +++--- pub_config.json5 | 6 +- sub_config.json5 | 6 +- .../net/routing/interceptor/accesscontrol.rs | 22 +-- zenoh/src/net/routing/interceptor/authz.rs | 123 +++++--------- 7 files changed, 201 insertions(+), 155 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bacfb9987b..b547dc0786 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -274,36 +274,126 @@ }, }, acl: { - enabled: true, - default_deny: false, - policy_list: { - "policy_definition": "NetworkInterface", - "ruleset": [ - { - "attribute": "NetworkInterface", - "rules": [ - { - "subject": "lo0", - "ke": "test/thr/a", - "action": "Put", - "permission": "Allow" - }, - { - "subject": "lo0", - "ke": "test/thr", - "action": "Put", - "permission": "Allow" - }, - { - "subject": "lo0", - "ke": "test/thr", - "action": "Sub", - "permission": "Allow" - } - ] - } - ] - } + "enabled": true, + "blacklist": true, + "rules": [ + { + "interface": [ + "lo0", + "interface0", + "int1", + "int2" + ], + "key_expr": [ + "test/thr", + "test/demo", + "test/example/a/b", + "test/b/example/a/bone/activity/basin", + "some/demo/test/a", + "some/birth/example/a/bone/bit/airplane", + "some/demo/test/a/blood", + "test/example/a", + "test/authority/example/a/acoustics/board", + "some/b/example/a", + "some/room/example/net", + "test/example/a/ants/humidity", + "some/account/example/a/argument/humidity", + "test/b/example/a/info/birthday", + "test/b/example/org/info/humidity", + "some/info/example/net", + "some/demo/test/a", + "test/amusement/example/a/angle", + "some/b/example/a/baseball", + "test/b/example/org", + "some/b/example/a", + "test/b/example/a/basket/humidity", + "test/example/a", + "test/b/example/net/army/aunt/d", + "some/appliance/example/net/box", + "some/b/example/org/number", + "some/example/net/beginner/d", + "some/birthday/example/net", + "test/believe/example/a/battle", + "test/b/example/org/baseball/speedb", + "some/basket/example/a", + "some/b/example/net/birds", + "some/demo/test/a", + "test/bear/example/a/blow", + "test/b/example/net", + "some/demo/test/a/achiever/action", + "test/b/example/net", + "test/b/example/a", + "test/b/example/a/believe/temp", + "test/example/a/basketball", + "test/example/a/afternoon/d/bells", + "test/example/a/bubble/brick", + "test/b/example/a", + "test/boot/example/org/boat/board", + "test/b/example/a", + "test/room/example/a/c/d", + "some/b/example/org", + "some/b/example/a/box/book/temp", + "some/b/example/a/adjustment/temp", + "some/example/net/belief/afternoon", + "test/b/example/a/activity/info", + "some/b/example/org/sensor/arm", + "some/zenoh/example/org/bead/bridge", + "test/brother/example/a/bath", + "test/example/a", + "test/example/a/sensor", + "some/back/example/a/balance/bird/humidity", + "test/zenoh/example/a/box/action/humidity", + "test/b/example/a", + "some/demo/test/a/bedroom/temp", + "some/b/example/a/ball/humidity", + "test/airplane/example/a/art/animal", + "some/example/net", + "test/b/example/a", + "some/demo/test/a/baseball/achiever", + "some/demo/test/a/berry/arch/temp", + "test/arithmetic/example/a/basket", + "some/example/net/art/bikes/humidity", + "some/demo/test/a/bedroom", + "some/demo/test/a", + "some/appliance/example/a", + "test/b/example/a", + "test/b/example/a/agreement", + "some/example/net/bird/sound", + "test/b/example/a/argument/info/basket", + "some/b/example/a/balance/boundary", + "some/arch/example/a/argument", + "some/demo/test/a/zenoh/brake", + "test/b/example/a/bath/brass", + "some/anger/example/net", + "test/b/example/a/boat/humidity", + "some/demo/test/a/b/c", + "test/b/example/a/brother/temp", + "test/b/example/a", + "some/b/example/a", + "test/b/example/org", + "some/b/example/a/amount/b", + "some/b/example/org/heat/humidity", + "some/demo/test/a", + "some/b/example/edu/activity", + "some/argument/example/a/suggest/humidity", + "test/example/a/believe/anger/humidity", + "test/b/example/a/sensor/b/c", + "test/example/edu/agreement", + "test/example/org", + "some/demo/test/a", + "test/b/example/a/airplane/wing", + "test/b/example/a", + "some/b/example/net/beef/bedroom/temp", + "test/b/example/a/blade/angle", + ], + "action": [ + "Put", + "Sub", + "Get" + ], + "permission": "Allow" + } + ] } }, /// Configure the Admin Space diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index ab1bad15fd..c0fcd4b9f5 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -216,8 +216,8 @@ impl Default for AclConfig { fn default() -> Self { Self { enabled: Some(false), - default_deny: Some(false), - policy_list: None, + blacklist: Some(false), + rules: None, } } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 689d8d93c2..30ecce6fef 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -97,39 +97,32 @@ pub struct DownsamplingItemConf { pub flow: DownsamplingFlow, } -//adding for authz structs - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct PolicyList { - pub policy_definition: String, - pub ruleset: Vec, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct AttributeRules { - pub attribute: String, - pub rules: Vec, -} - -#[derive(Clone, Serialize, Debug, Deserialize)] -pub struct AttributeRule { - pub subject: Subject, - pub ke: String, - pub action: Action, - pub permission: Permission, -} +//adding datatypes needed for ACL Config #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] pub enum Permission { Allow, Deny, } +#[derive(Serialize, Debug, Deserialize, Clone)] + +pub struct ConfigRule { + pub interface: Vec, + pub key_expr: Vec, + pub action: Vec, + pub permission: Permission, +} +#[derive(Clone, Serialize, Debug, Deserialize)] +pub struct PolicyRule { + pub subject: Subject, //Subject + pub key_expr: String, + pub action: Action, //Action + pub permission: Permission, //Permission +} #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] pub enum Subject { - UserId(ZenohId), - NetworkInterface(String), //clarify - MetadataType(String), //clarify + Interface(String), } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] @@ -455,8 +448,8 @@ validated_struct::validator! { }, pub acl: AclConfig { pub enabled: Option, - pub default_deny: Option, - pub policy_list: Option + pub blacklist: Option, + pub rules: Option> } }, /// Configuration of the admin space. diff --git a/pub_config.json5 b/pub_config.json5 index 2b9d95703f..8c0a07b077 100644 --- a/pub_config.json5 +++ b/pub_config.json5 @@ -108,9 +108,9 @@ // /// Configure internal transport parameters transport: { acl: { - enabled: false, - default_deny: false, - policy_list: null, + "enabled": false, + "blacklist": false, + "rules": null, }, } // transport: { diff --git a/sub_config.json5 b/sub_config.json5 index 145fa33444..9cb7be07bc 100644 --- a/sub_config.json5 +++ b/sub_config.json5 @@ -31,9 +31,9 @@ }, transport: { acl: { - enabled: false, - default_deny: false, - policy_list: null, + "enabled": false, + "blacklist": false, + "rules": null, }, } // /// Configure the scouting mechanisms and their behaviours diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index 97ce8a8865..21f54033ba 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -66,7 +66,7 @@ impl InterceptorFactoryTrait for AclEnforcer { let e = self.e.clone(); if let Some(sm) = &e.subject_map { for i in link.interfaces { - let x = &Subject::NetworkInterface(i.to_string()); + let x = &Subject::Interface(i); if sm.contains_key(x) { interface_list.push(*sm.get(x).unwrap()); } @@ -105,7 +105,10 @@ impl InterceptorTrait for IngressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - let kexpr = ctx.full_expr().unwrap(); //add the cache here + let kexpr = match ctx.full_expr() { + Some(val) => val, + None => return None, + }; //add the cache here let interface_list = &self.interface_list; if let NetworkBody::Push(Push { @@ -124,10 +127,7 @@ impl InterceptorTrait for IngressAclEnforcer { // let action = Action::Put; let mut decision = false; for subject in interface_list { - match self - .pe - .policy_decision_point(*subject, Action::Put, kexpr.to_string()) - { + match self.pe.policy_decision_point(*subject, Action::Put, kexpr) { Ok(val) => { if val { decision = val; @@ -151,7 +151,10 @@ impl InterceptorTrait for EgressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - let kexpr = ctx.full_expr().unwrap(); //add the cache here + let kexpr = match ctx.full_expr() { + Some(val) => val, + None => return None, + }; //add the cache here let interface_list = &self.interface_list; if let NetworkBody::Push(Push { payload: PushBody::Put(_), @@ -161,10 +164,7 @@ impl InterceptorTrait for EgressAclEnforcer { // let action = ; let mut decision = false; for subject in interface_list { - match self - .pe - .policy_decision_point(*subject, Action::Sub, kexpr.to_string()) - { + match self.pe.policy_decision_point(*subject, Action::Sub, kexpr) { Ok(val) => { if val { decision = val; diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index 10b8a8a46f..e943586394 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,6 +1,5 @@ use rustc_hash::FxHashMap; -use serde::Deserialize; -use zenoh_config::{AclConfig, Action, AttributeRules, Permission, Subject}; +use zenoh_config::{AclConfig, Action, ConfigRule, Permission, PolicyRule, Subject}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; @@ -11,11 +10,6 @@ const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum (fixed) pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject -#[derive(Deserialize, Debug)] -pub struct GetPolicy { - policy_definition: String, - ruleset: Vec, -} type KeTreeRule = KeBoxTree; pub struct PolicyEnforcer { @@ -28,38 +22,8 @@ pub struct PolicyEnforcer { #[derive(Debug, Clone)] pub struct PolicyInformation { - _policy_definition: String, - _attribute_list: Vec, //list of attribute names in string subject_map: FxHashMap, - policy_rules: Vec, -} - -// #[derive(Debug, Deserialize, Clone)] -// pub struct AttributeRules { -// attribute: String, -// rules: Vec, -// } - -// #[derive(Clone, Debug, Deserialize)] -// pub struct AttributeRule { -// subject: Subject, -// ke: String, -// action: Action, -// permission: Permission, -// } -// use zenoh_config::ZenohId; - -// #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] -// #[serde(untagged)] -// pub enum Subject { -// UserId(ZenohId), -// NetworkInterface(String), -// } -#[derive(Debug)] -pub struct RequestInfo { - pub sub: Vec, - pub ke: String, - pub action: Action, + policy_rules: Vec, } impl PolicyEnforcer { @@ -79,21 +43,19 @@ impl PolicyEnforcer { Some(val) => self.acl_enabled = val, None => log::error!("acl config was not setup properly"), } - match acl_config.default_deny { + match acl_config.blacklist { Some(val) => self.default_deny = val, None => log::error!("error default_deny not setup"), } if self.acl_enabled { - match acl_config.policy_list { + match acl_config.rules { Some(policy_list) => { let policy_information = self.policy_information_point(policy_list)?; - self.subject_map = Some(policy_information.subject_map.clone()); + let subject_map = policy_information.subject_map; let mut main_policy: PolicyMap = PolicyMap(FxHashMap::default()); - //first initialize the vector of vectors (needed to maintain the indices) - let subject_map = policy_information.subject_map.clone(); - for (_, index) in subject_map { + for (_, index) in &subject_map { let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); for _i in 0..NUMBER_OF_ACTIONS { let mut action_rule: Vec = Vec::new(); @@ -104,21 +66,20 @@ impl PolicyEnforcer { } rule.0.push(action_rule); } - main_policy.0.insert(index, rule); + main_policy.0.insert(*index, rule); } - for rules in policy_information.policy_rules { - for rule in rules.rules { - //add values to the ketree as per the rules - //get the subject index - let index = policy_information.subject_map.get(&rule.subject).unwrap(); - main_policy.0.get_mut(index).unwrap().0[rule.action as usize] - [rule.permission as usize] - .insert(keyexpr::new(&rule.ke)?, true); - } + for rule in policy_information.policy_rules { + //add key-expression values to the ketree as per the policy rules + + let index = subject_map.get(&rule.subject).unwrap(); + main_policy.0.get_mut(index).unwrap().0[rule.action as usize] + [rule.permission as usize] + .insert(keyexpr::new(&rule.key_expr)?, true); } //add to the policy_enforcer self.policy_list = Some(main_policy); + self.subject_map = Some(subject_map); } None => log::error!("no policy list was specified"), } @@ -127,36 +88,38 @@ impl PolicyEnforcer { } pub fn policy_information_point( &self, - policy_list: zenoh_config::PolicyList, + config_rule_set: Vec, ) -> ZResult { - let value = serde_json::to_value(&policy_list).unwrap(); - let policy_list_info: GetPolicy = serde_json::from_value(value)?; - let enforced_attributes = policy_list_info - .policy_definition - .split(' ') - .collect::>(); - - let complete_ruleset = policy_list_info.ruleset; - let mut _attribute_list: Vec = Vec::new(); - let mut policy_rules: Vec = Vec::new(); - let mut subject_map = FxHashMap::default(); - let mut counter = 1; //starting at 1 since 0 is default value in policy_check and should not match anything - for attr_rule in complete_ruleset.iter() { - if enforced_attributes.contains(&attr_rule.attribute.as_str()) { - _attribute_list.push(attr_rule.attribute.clone()); - policy_rules.push(attr_rule.clone()); - for rule in attr_rule.rules.clone() { - subject_map.insert(rule.subject, counter); - counter += 1; + /* + get the list of policies from the config policymap + convert them into the subject format for the vec of rules + send the vec as part of policy information + also take the subject values to create the subject_map and pass that as part of poliy infomration + */ + //we need to convert the vector sets of rules into individual rules for each subject, key-expr, action, permission + let mut policy_rules: Vec = Vec::new(); + for config_rule in config_rule_set { + for subject in &config_rule.interface { + for action in &config_rule.action { + for key_expr in &config_rule.key_expr { + policy_rules.push(PolicyRule { + subject: Subject::Interface(subject.clone()), + key_expr: key_expr.clone(), + action: action.clone(), + permission: config_rule.permission.clone(), + }) + } } } } - - let _policy_definition = policy_list_info.policy_definition; - + //create subject map + let mut subject_map = FxHashMap::default(); + let mut counter = 1; //starting at 1 since 0 is initialized value in policy_check and should not match anything + for rule in policy_rules.iter() { + subject_map.insert(rule.subject.clone(), counter); + counter += 1; + } Ok(PolicyInformation { - _policy_definition, - _attribute_list, subject_map, policy_rules, }) @@ -170,7 +133,7 @@ impl PolicyEnforcer { &self, subject: i32, action: Action, - key_expr: String, + key_expr: &str, //String, ) -> ZResult { match &self.policy_list { Some(policy_map) => { From c2e41a14378da79bb4723843123c745f6061df01 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 1 Mar 2024 13:26:15 +0100 Subject: [PATCH 067/122] WIP: Modified for new config style --- DEFAULT_CONFIG.json5 | 12 ++++++++++ commons/zenoh-config/src/lib.rs | 2 ++ examples/examples/z_sub_thr.rs | 2 +- .../net/routing/interceptor/accesscontrol.rs | 24 +++++++++++++++++-- zenoh/src/net/routing/interceptor/authz.rs | 11 ++++----- 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index b547dc0786..b505cc0889 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -392,6 +392,18 @@ "Get" ], "permission": "Allow" + }, + { + "interface": [ + "lo0" + ], + "key_expr": [ + "test/thr" + ], + "action": [ + "Sub" + ], + "permission": "Allow" } ] } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 30ecce6fef..6a231e81a1 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -132,6 +132,8 @@ pub enum Action { Get, Reply, } +pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum +pub const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index afdd07ed23..968dd6c618 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -81,7 +81,7 @@ fn main() { let session = zenoh::open(config).res().unwrap(); - let key_expr = "test/thr"; + let key_expr = "test/**"; let mut stats = Stats::new(n); let _sub = session diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index 21f54033ba..9458bae59d 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -124,7 +124,6 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) = &ctx.msg.body { - // let action = Action::Put; let mut decision = false; for subject in interface_list { match self.pe.policy_decision_point(*subject, Action::Put, kexpr) { @@ -142,6 +141,28 @@ impl InterceptorTrait for IngressAclEnforcer { } } + if let NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) = &ctx.msg.body + { + let mut decision = false; + for subject in interface_list { + match self.pe.policy_decision_point(*subject, Action::Get, kexpr) { + Ok(val) => { + if val { + decision = val; + break; + } + } + Err(_) => return None, + } + } + if !decision { + return None; + } + } + Some(ctx) } } @@ -178,7 +199,6 @@ impl InterceptorTrait for EgressAclEnforcer { return None; } } - Some(ctx) } } diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index e943586394..c9b7c6e9d2 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,12 +1,12 @@ use rustc_hash::FxHashMap; -use zenoh_config::{AclConfig, Action, ConfigRule, Permission, PolicyRule, Subject}; +use zenoh_config::{ + AclConfig, Action, ConfigRule, Permission, PolicyRule, Subject, NUMBER_OF_ACTIONS, + NUMBER_OF_PERMISSIONS, +}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (small but might change) -const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum (fixed) - pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject @@ -55,7 +55,7 @@ impl PolicyEnforcer { let subject_map = policy_information.subject_map; let mut main_policy: PolicyMap = PolicyMap(FxHashMap::default()); //first initialize the vector of vectors (needed to maintain the indices) - for (_, index) in &subject_map { + for index in subject_map.values() { let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); for _i in 0..NUMBER_OF_ACTIONS { let mut action_rule: Vec = Vec::new(); @@ -71,7 +71,6 @@ impl PolicyEnforcer { for rule in policy_information.policy_rules { //add key-expression values to the ketree as per the policy rules - let index = subject_map.get(&rule.subject).unwrap(); main_policy.0.get_mut(index).unwrap().0[rule.action as usize] [rule.permission as usize] From 47b2b9fbe01d464d8eb10139345d13e71d141676 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 1 Mar 2024 19:17:21 +0100 Subject: [PATCH 068/122] WIP: Added changes for default behaviour --- Cargo.lock | 2 +- DEFAULT_CONFIG.json5 | 109 +------------ commons/zenoh-config/src/defaults.rs | 4 +- commons/zenoh-config/src/lib.rs | 24 +-- examples/examples/z_sub_thr.rs | 2 +- pub_config.json5 | 10 +- sub_config.json5 | 10 +- zenoh/Cargo.toml | 3 +- .../net/routing/interceptor/accesscontrol.rs | 123 ++++++++------- zenoh/src/net/routing/interceptor/authz.rs | 146 ++++++++++-------- 10 files changed, 176 insertions(+), 257 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ece93e040b..840a3e0876 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4564,6 +4564,7 @@ dependencies = [ name = "zenoh" version = "0.11.0-dev" dependencies = [ + "ahash", "async-global-executor", "async-std", "async-trait", @@ -4583,7 +4584,6 @@ dependencies = [ "petgraph", "rand 0.8.5", "regex", - "rustc-hash", "rustc_version 0.4.0", "serde", "serde_json", diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index b505cc0889..02679690c6 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -275,120 +275,17 @@ }, acl: { "enabled": true, - "blacklist": true, + "default_permission": "Deny", "rules": [ { "interface": [ - "lo0", - "interface0", - "int1", - "int2" + "lo0" ], "key_expr": [ - "test/thr", - "test/demo", - "test/example/a/b", - "test/b/example/a/bone/activity/basin", - "some/demo/test/a", - "some/birth/example/a/bone/bit/airplane", - "some/demo/test/a/blood", - "test/example/a", - "test/authority/example/a/acoustics/board", - "some/b/example/a", - "some/room/example/net", - "test/example/a/ants/humidity", - "some/account/example/a/argument/humidity", - "test/b/example/a/info/birthday", - "test/b/example/org/info/humidity", - "some/info/example/net", - "some/demo/test/a", - "test/amusement/example/a/angle", - "some/b/example/a/baseball", - "test/b/example/org", - "some/b/example/a", - "test/b/example/a/basket/humidity", - "test/example/a", - "test/b/example/net/army/aunt/d", - "some/appliance/example/net/box", - "some/b/example/org/number", - "some/example/net/beginner/d", - "some/birthday/example/net", - "test/believe/example/a/battle", - "test/b/example/org/baseball/speedb", - "some/basket/example/a", - "some/b/example/net/birds", - "some/demo/test/a", - "test/bear/example/a/blow", - "test/b/example/net", - "some/demo/test/a/achiever/action", - "test/b/example/net", - "test/b/example/a", - "test/b/example/a/believe/temp", - "test/example/a/basketball", - "test/example/a/afternoon/d/bells", - "test/example/a/bubble/brick", - "test/b/example/a", - "test/boot/example/org/boat/board", - "test/b/example/a", - "test/room/example/a/c/d", - "some/b/example/org", - "some/b/example/a/box/book/temp", - "some/b/example/a/adjustment/temp", - "some/example/net/belief/afternoon", - "test/b/example/a/activity/info", - "some/b/example/org/sensor/arm", - "some/zenoh/example/org/bead/bridge", - "test/brother/example/a/bath", - "test/example/a", - "test/example/a/sensor", - "some/back/example/a/balance/bird/humidity", - "test/zenoh/example/a/box/action/humidity", - "test/b/example/a", - "some/demo/test/a/bedroom/temp", - "some/b/example/a/ball/humidity", - "test/airplane/example/a/art/animal", - "some/example/net", - "test/b/example/a", - "some/demo/test/a/baseball/achiever", - "some/demo/test/a/berry/arch/temp", - "test/arithmetic/example/a/basket", - "some/example/net/art/bikes/humidity", - "some/demo/test/a/bedroom", - "some/demo/test/a", - "some/appliance/example/a", - "test/b/example/a", - "test/b/example/a/agreement", - "some/example/net/bird/sound", - "test/b/example/a/argument/info/basket", - "some/b/example/a/balance/boundary", - "some/arch/example/a/argument", - "some/demo/test/a/zenoh/brake", - "test/b/example/a/bath/brass", - "some/anger/example/net", - "test/b/example/a/boat/humidity", - "some/demo/test/a/b/c", - "test/b/example/a/brother/temp", - "test/b/example/a", - "some/b/example/a", - "test/b/example/org", - "some/b/example/a/amount/b", - "some/b/example/org/heat/humidity", - "some/demo/test/a", - "some/b/example/edu/activity", - "some/argument/example/a/suggest/humidity", - "test/example/a/believe/anger/humidity", - "test/b/example/a/sensor/b/c", - "test/example/edu/agreement", - "test/example/org", - "some/demo/test/a", - "test/b/example/a/airplane/wing", - "test/b/example/a", - "some/b/example/net/beef/bedroom/temp", - "test/b/example/a/blade/angle", + "test/thr" ], "action": [ "Put", - "Sub", "Get" ], "permission": "Allow" diff --git a/commons/zenoh-config/src/defaults.rs b/commons/zenoh-config/src/defaults.rs index c0fcd4b9f5..825bc9adb7 100644 --- a/commons/zenoh-config/src/defaults.rs +++ b/commons/zenoh-config/src/defaults.rs @@ -215,8 +215,8 @@ impl Default for SharedMemoryConf { impl Default for AclConfig { fn default() -> Self { Self { - enabled: Some(false), - blacklist: Some(false), + enabled: false, + default_permission: Permission::Deny, rules: None, } } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 6a231e81a1..b2c5ca9cae 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -98,11 +98,7 @@ pub struct DownsamplingItemConf { } //adding datatypes needed for ACL Config -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] -pub enum Permission { - Allow, - Deny, -} + #[derive(Serialize, Debug, Deserialize, Clone)] pub struct ConfigRule { @@ -123,6 +119,7 @@ pub struct PolicyRule { #[serde(untagged)] pub enum Subject { Interface(String), + //Username(String) } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] @@ -130,10 +127,17 @@ pub enum Action { Put, Sub, Get, - Reply, + Queryable, } -pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum -pub const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum +pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (change according to Action size) + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +pub enum Permission { + Allow, + Deny, +} +pub const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum (permanently fixed to 2) + pub trait ConfigValidator: Send + Sync { fn check_config( &self, @@ -449,8 +453,8 @@ validated_struct::validator! { }, }, pub acl: AclConfig { - pub enabled: Option, - pub blacklist: Option, + pub enabled: bool, + pub default_permission: Permission, pub rules: Option> } }, diff --git a/examples/examples/z_sub_thr.rs b/examples/examples/z_sub_thr.rs index 968dd6c618..afdd07ed23 100644 --- a/examples/examples/z_sub_thr.rs +++ b/examples/examples/z_sub_thr.rs @@ -81,7 +81,7 @@ fn main() { let session = zenoh::open(config).res().unwrap(); - let key_expr = "test/**"; + let key_expr = "test/thr"; let mut stats = Stats::new(n); let _sub = session diff --git a/pub_config.json5 b/pub_config.json5 index 8c0a07b077..5059e01267 100644 --- a/pub_config.json5 +++ b/pub_config.json5 @@ -107,11 +107,11 @@ // // }, // /// Configure internal transport parameters transport: { - acl: { - "enabled": false, - "blacklist": false, - "rules": null, - }, + // acl: { + // "enabled": false, + // "blacklist": false, + // "rules": null, + // }, } // transport: { // unicast: { diff --git a/sub_config.json5 b/sub_config.json5 index 9cb7be07bc..8828cdbf55 100644 --- a/sub_config.json5 +++ b/sub_config.json5 @@ -30,11 +30,11 @@ ], }, transport: { - acl: { - "enabled": false, - "blacklist": false, - "rules": null, - }, + // acl: { + // "enabled": false, + // "blacklist": false, + // "rules": null, + // }, } // /// Configure the scouting mechanisms and their behaviours // scouting: { diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index ee448ae7aa..b8b51d2907 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -63,6 +63,7 @@ default = [ ] [dependencies] +ahash = { workspace = true } async-global-executor = { workspace = true } async-std = { workspace = true, features = ["attributes"] } async-trait = { workspace = true } @@ -105,8 +106,6 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-keyexpr = { workspace = true } - -rustc-hash = "1.1.0" [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/accesscontrol.rs index 9458bae59d..e4f4d25cff 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/accesscontrol.rs @@ -1,5 +1,5 @@ use std::sync::Arc; -use zenoh_config::{AclConfig, Action, Subject}; +use zenoh_config::{AclConfig, Action, Permission, Subject}; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{PushBody, RequestBody, ResponseBody}, @@ -20,37 +20,35 @@ pub(crate) struct AclEnforcer { struct EgressAclEnforcer { pe: Arc, interface_list: Vec, + default_decision: bool, } struct IngressAclEnforcer { pe: Arc, interface_list: Vec, + default_decision: bool, } pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult> { let mut res: Vec = vec![]; - let mut acl_enabled = false; - match acl_config.enabled { - Some(val) => acl_enabled = val, - None => { - log::warn!("acl config is not setup"); - //return Ok(res); - } - } - if acl_enabled { + + if acl_config.enabled { let mut policy_enforcer = PolicyEnforcer::new(); match policy_enforcer.init(acl_config) { Ok(_) => { - log::debug!("access control is enabled and initialized"); + log::debug!("Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { e: Arc::new(policy_enforcer), })) } Err(e) => log::error!( - "access control enabled but not initialized with error {}!", + "Access control enabled but not initialized with error {}!", e ), } + } else { + log::warn!("Access Control is disabled in config!"); } + Ok(res) } @@ -64,26 +62,34 @@ impl InterceptorFactoryTrait for AclEnforcer { log::debug!("acl interceptor links details {:?}", links); for link in links { let e = self.e.clone(); - if let Some(sm) = &e.subject_map { - for i in link.interfaces { - let x = &Subject::Interface(i); - if sm.contains_key(x) { - interface_list.push(*sm.get(x).unwrap()); + if let Some(subject_map) = &e.subject_map { + for face in link.interfaces { + let subject = &Subject::Interface(face); + match subject_map.get(subject) { + Some(val) => interface_list.push(*val), + None => continue, } } } } } - log::debug!("log info"); let pe = self.e.clone(); ( Some(Box::new(IngressAclEnforcer { pe: pe.clone(), interface_list: interface_list.clone(), + default_decision: match pe.default_permission { + Permission::Allow => true, + Permission::Deny => false, + }, })), Some(Box::new(EgressAclEnforcer { pe: pe.clone(), interface_list, + default_decision: match pe.default_permission { + Permission::Allow => true, + Permission::Deny => false, + }, })), ) } @@ -105,12 +111,7 @@ impl InterceptorTrait for IngressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - let kexpr = match ctx.full_expr() { - Some(val) => val, - None => return None, - }; //add the cache here - let interface_list = &self.interface_list; - + let key_expr = ctx.full_expr()?; //TODO add caching if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -124,37 +125,45 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) = &ctx.msg.body { - let mut decision = false; - for subject in interface_list { - match self.pe.policy_decision_point(*subject, Action::Put, kexpr) { - Ok(val) => { - if val { - decision = val; - break; - } + let mut decision = self.default_decision; + + for subject in &self.interface_list { + match self.pe.policy_decision_point( + *subject, + Action::Put, + key_expr, + self.default_decision, + ) { + Ok(true) => { + decision = true; + break; } + Ok(false) => continue, Err(_) => return None, } } + if !decision { return None; } - } - - if let NetworkBody::Request(Request { + } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. }) = &ctx.msg.body { - let mut decision = false; - for subject in interface_list { - match self.pe.policy_decision_point(*subject, Action::Get, kexpr) { - Ok(val) => { - if val { - decision = val; - break; - } + let mut decision = self.default_decision; + for subject in &self.interface_list { + match self.pe.policy_decision_point( + *subject, + Action::Get, + key_expr, + self.default_decision, + ) { + Ok(true) => { + decision = true; + break; } + Ok(false) => continue, Err(_) => return None, } } @@ -162,7 +171,6 @@ impl InterceptorTrait for IngressAclEnforcer { return None; } } - Some(ctx) } } @@ -172,26 +180,25 @@ impl InterceptorTrait for EgressAclEnforcer { &self, ctx: RoutingContext, ) -> Option> { - let kexpr = match ctx.full_expr() { - Some(val) => val, - None => return None, - }; //add the cache here - let interface_list = &self.interface_list; + let key_expr = ctx.full_expr()?; //TODO add caching if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - // let action = ; - let mut decision = false; - for subject in interface_list { - match self.pe.policy_decision_point(*subject, Action::Sub, kexpr) { - Ok(val) => { - if val { - decision = val; - break; - } + let mut decision = self.default_decision; + for subject in &self.interface_list { + match self.pe.policy_decision_point( + *subject, + Action::Sub, + key_expr, + self.default_decision, + ) { + Ok(true) => { + decision = true; + break; } + Ok(false) => continue, Err(_) => return None, } } diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index c9b7c6e9d2..edad87c95a 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -1,4 +1,5 @@ -use rustc_hash::FxHashMap; +use ahash::RandomState; +use std::collections::HashMap; use zenoh_config::{ AclConfig, Action, ConfigRule, Permission, PolicyRule, Subject, NUMBER_OF_ACTIONS, NUMBER_OF_PERMISSIONS, @@ -8,21 +9,21 @@ use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this -pub struct PolicyMap(pub FxHashMap); //index of subject_map instead of subject +pub struct PolicyList(pub HashMap); //index of subject_map instead of subject type KeTreeRule = KeBoxTree; pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, - pub(crate) default_deny: bool, - pub(crate) subject_map: Option>, //should have all attribute names - pub(crate) policy_list: Option, + pub(crate) default_permission: Permission, + pub(crate) subject_map: Option>, + pub(crate) policy_list: Option, } #[derive(Debug, Clone)] pub struct PolicyInformation { - subject_map: FxHashMap, + subject_map: HashMap, policy_rules: Vec, } @@ -30,57 +31,55 @@ impl PolicyEnforcer { pub fn new() -> PolicyEnforcer { PolicyEnforcer { acl_enabled: true, - default_deny: true, + default_permission: Permission::Deny, subject_map: None, policy_list: None, } } pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { - //returns Ok() for all good else returns Error - //insert values into the enforcer from the config file - match acl_config.enabled { - Some(val) => self.acl_enabled = val, - None => log::error!("acl config was not setup properly"), - } - match acl_config.blacklist { - Some(val) => self.default_deny = val, - None => log::error!("error default_deny not setup"), - } + self.acl_enabled = acl_config.enabled; + self.default_permission = acl_config.default_permission; if self.acl_enabled { - match acl_config.rules { - Some(policy_list) => { - let policy_information = self.policy_information_point(policy_list)?; + if let Some(rules) = acl_config.rules { + if rules.is_empty() { + log::warn!("ACL ruleset in config file is empty!!!"); + self.policy_list = None; + self.subject_map = None; + } + let policy_information = self.policy_information_point(rules)?; - let subject_map = policy_information.subject_map; - let mut main_policy: PolicyMap = PolicyMap(FxHashMap::default()); - //first initialize the vector of vectors (needed to maintain the indices) - for index in subject_map.values() { - let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); - for _i in 0..NUMBER_OF_ACTIONS { - let mut action_rule: Vec = Vec::new(); - for _j in 0..NUMBER_OF_PERMISSIONS { - let permission_rule = KeTreeRule::new(); - // - action_rule.push(permission_rule); - } - rule.0.push(action_rule); + let subject_map = policy_information.subject_map; + let mut main_policy: PolicyList = PolicyList(HashMap::default()); + //first initialize the vector of vectors (required to maintain the indices) + for index in subject_map.values() { + let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); + for _i in 0..NUMBER_OF_ACTIONS { + let mut action_rule: Vec = Vec::new(); + for _j in 0..NUMBER_OF_PERMISSIONS { + let permission_rule = KeTreeRule::new(); + // + action_rule.push(permission_rule); } - main_policy.0.insert(*index, rule); + rule.0.push(action_rule); } + main_policy.0.insert(*index, rule); + } - for rule in policy_information.policy_rules { - //add key-expression values to the ketree as per the policy rules - let index = subject_map.get(&rule.subject).unwrap(); - main_policy.0.get_mut(index).unwrap().0[rule.action as usize] - [rule.permission as usize] - .insert(keyexpr::new(&rule.key_expr)?, true); - } - //add to the policy_enforcer - self.policy_list = Some(main_policy); - self.subject_map = Some(subject_map); + for rule in policy_information.policy_rules { + //add key-expression values to the ketree as per the policy rules + if let Some(index) = subject_map.get(&rule.subject) { + if let Some(single_policy) = main_policy.0.get_mut(index) { + single_policy.0[rule.action as usize][rule.permission as usize] + .insert(keyexpr::new(&rule.key_expr)?, true); + } + }; } - None => log::error!("no policy list was specified"), + //add to the policy_enforcer + self.policy_list = Some(main_policy); + self.subject_map = Some(subject_map); + } else { + log::warn!("No ACL rules have been specified!!!"); } } Ok(()) @@ -90,12 +89,12 @@ impl PolicyEnforcer { config_rule_set: Vec, ) -> ZResult { /* - get the list of policies from the config policymap + get the list of policies from the config PolicyList convert them into the subject format for the vec of rules send the vec as part of policy information also take the subject values to create the subject_map and pass that as part of poliy infomration */ - //we need to convert the vector sets of rules into individual rules for each subject, key-expr, action, permission + //we need to convert the sets of rules into individual rules for each subject, key-expr, action, permission let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { for subject in &config_rule.interface { @@ -111,8 +110,7 @@ impl PolicyEnforcer { } } } - //create subject map - let mut subject_map = FxHashMap::default(); + let mut subject_map = HashMap::default(); let mut counter = 1; //starting at 1 since 0 is initialized value in policy_check and should not match anything for rule in policy_rules.iter() { subject_map.insert(rule.subject.clone(), counter); @@ -132,30 +130,44 @@ impl PolicyEnforcer { &self, subject: i32, action: Action, - key_expr: &str, //String, + key_expr: &str, + default_decision: bool, ) -> ZResult { match &self.policy_list { Some(policy_map) => { - let ps = policy_map.0.get(&subject).unwrap(); - let perm_vec = &ps.0[action as usize]; - - //check for deny + //let single_policy = policy_map.0.get(&subject).unwrap(); + match policy_map.0.get(&subject) { + Some(single_policy) => { + let perm_vec = &single_policy.0[action as usize]; - let deny_result = perm_vec[Permission::Deny as usize] - .nodes_including(keyexpr::new(&key_expr).unwrap()) - .count(); - if deny_result != 0 { - return Ok(false); + //explicit Deny rules are ALWAYS given preference + let deny_result = perm_vec[Permission::Deny as usize] + .nodes_including(keyexpr::new(&key_expr)?) + .count(); + if deny_result != 0 { + return Ok(false); + } + //if default_permission is Allow, ignore checks for Allow + if self.default_permission == Permission::Allow { + Ok(true) + } else { + let allow_result = perm_vec[Permission::Allow as usize] + .nodes_including(keyexpr::new(&key_expr)?) + .count(); + Ok(allow_result != 0) + } + } + None => Ok(default_decision), + } + } + None => { + //when list is empty + if self.default_permission == Permission::Allow { + Ok(true) + } else { + Ok(false) } - - //check for allow - - let allow_result = perm_vec[Permission::Allow as usize] - .nodes_including(keyexpr::new(&key_expr).unwrap()) - .count(); - Ok(allow_result != 0) } - None => Ok(false), } } } From 7cda03606efec9be5cb02e2bf4d35c928042a7b4 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 3 Mar 2024 19:03:35 +0100 Subject: [PATCH 069/122] WIP: Added changes for default behaviour --- zenoh/src/net/routing/interceptor/authz.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authz.rs index edad87c95a..f2a7821755 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authz.rs @@ -135,7 +135,6 @@ impl PolicyEnforcer { ) -> ZResult { match &self.policy_list { Some(policy_map) => { - //let single_policy = policy_map.0.get(&subject).unwrap(); match policy_map.0.get(&subject) { Some(single_policy) => { let perm_vec = &single_policy.0[action as usize]; From 2430355928e4e964982094599dfaac8eb19bd73c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 3 Mar 2024 19:40:14 +0100 Subject: [PATCH 070/122] WIP: Added changes for default behaviour --- .../{accesscontrol.rs => access_control.rs} | 28 +++++++++++++++---- .../{authz.rs => authorization.rs} | 26 +++++++---------- zenoh/src/net/routing/interceptor/mod.rs | 6 ++-- 3 files changed, 35 insertions(+), 25 deletions(-) rename zenoh/src/net/routing/interceptor/{accesscontrol.rs => access_control.rs} (85%) rename zenoh/src/net/routing/interceptor/{authz.rs => authorization.rs} (85%) diff --git a/zenoh/src/net/routing/interceptor/accesscontrol.rs b/zenoh/src/net/routing/interceptor/access_control.rs similarity index 85% rename from zenoh/src/net/routing/interceptor/accesscontrol.rs rename to zenoh/src/net/routing/interceptor/access_control.rs index e4f4d25cff..ef614455c8 100644 --- a/zenoh/src/net/routing/interceptor/accesscontrol.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -10,7 +10,7 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; use crate::net::routing::RoutingContext; use super::{ - authz::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, }; pub(crate) struct AclEnforcer { @@ -35,7 +35,7 @@ pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult { - log::debug!("Access control is enabled and initialized"); + log::info!("Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { e: Arc::new(policy_enforcer), })) @@ -59,7 +59,6 @@ impl InterceptorFactoryTrait for AclEnforcer { ) -> (Option, Option) { let mut interface_list: Vec = Vec::new(); if let Ok(links) = transport.get_links() { - log::debug!("acl interceptor links details {:?}", links); for link in links { let e = self.e.clone(); if let Some(subject_map) = &e.subject_map { @@ -139,13 +138,18 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(_) => return None, + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); + return None; + } } } if !decision { + log::warn!("Unauthorized to Put"); return None; } + log::info!("Authorized access to Put"); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. @@ -164,12 +168,18 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(_) => return None, + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); + return None; + } } } if !decision { + log::warn!("Unauthorized to Query/Get"); return None; } + + log::info!("Authorized access to Query"); } Some(ctx) } @@ -199,12 +209,18 @@ impl InterceptorTrait for EgressAclEnforcer { break; } Ok(false) => continue, - Err(_) => return None, + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); + return None; + } } } if !decision { + log::warn!("Unauthorized to Sub"); return None; } + + log::info!("Authorized access to Sub"); } Some(ctx) } diff --git a/zenoh/src/net/routing/interceptor/authz.rs b/zenoh/src/net/routing/interceptor/authorization.rs similarity index 85% rename from zenoh/src/net/routing/interceptor/authz.rs rename to zenoh/src/net/routing/interceptor/authorization.rs index f2a7821755..afb8bf76e0 100644 --- a/zenoh/src/net/routing/interceptor/authz.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -9,7 +9,7 @@ use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this -pub struct PolicyList(pub HashMap); //index of subject_map instead of subject +pub struct PolicyMap(pub HashMap); //index of subject (i32) instead of subject (String) type KeTreeRule = KeBoxTree; @@ -17,7 +17,7 @@ pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, pub(crate) subject_map: Option>, - pub(crate) policy_list: Option, + pub(crate) policy_map: Option, } #[derive(Debug, Clone)] @@ -33,7 +33,7 @@ impl PolicyEnforcer { acl_enabled: true, default_permission: Permission::Deny, subject_map: None, - policy_list: None, + policy_map: None, } } @@ -44,13 +44,13 @@ impl PolicyEnforcer { if let Some(rules) = acl_config.rules { if rules.is_empty() { log::warn!("ACL ruleset in config file is empty!!!"); - self.policy_list = None; + self.policy_map = None; self.subject_map = None; } let policy_information = self.policy_information_point(rules)?; let subject_map = policy_information.subject_map; - let mut main_policy: PolicyList = PolicyList(HashMap::default()); + let mut main_policy: PolicyMap = PolicyMap(HashMap::default()); //first initialize the vector of vectors (required to maintain the indices) for index in subject_map.values() { let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); @@ -76,7 +76,7 @@ impl PolicyEnforcer { }; } //add to the policy_enforcer - self.policy_list = Some(main_policy); + self.policy_map = Some(main_policy); self.subject_map = Some(subject_map); } else { log::warn!("No ACL rules have been specified!!!"); @@ -88,13 +88,7 @@ impl PolicyEnforcer { &self, config_rule_set: Vec, ) -> ZResult { - /* - get the list of policies from the config PolicyList - convert them into the subject format for the vec of rules - send the vec as part of policy information - also take the subject values to create the subject_map and pass that as part of poliy infomration - */ - //we need to convert the sets of rules into individual rules for each subject, key-expr, action, permission + //convert the sets of rules from coifig format into individual rules for each subject, key-expr, action, permission let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { for subject in &config_rule.interface { @@ -111,7 +105,7 @@ impl PolicyEnforcer { } } let mut subject_map = HashMap::default(); - let mut counter = 1; //starting at 1 since 0 is initialized value in policy_check and should not match anything + let mut counter = 1; //starting at 1 since 0 is the init value and should not match anything for rule in policy_rules.iter() { subject_map.insert(rule.subject.clone(), counter); counter += 1; @@ -133,7 +127,7 @@ impl PolicyEnforcer { key_expr: &str, default_decision: bool, ) -> ZResult { - match &self.policy_list { + match &self.policy_map { Some(policy_map) => { match policy_map.0.get(&subject) { Some(single_policy) => { @@ -160,7 +154,7 @@ impl PolicyEnforcer { } } None => { - //when list is empty + //when list is present (not null) but empty if self.default_permission == Permission::Allow { Ok(true) } else { diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index fbf1d1e844..ddb191e6c9 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -18,10 +18,10 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) //! -mod accesscontrol; -use accesscontrol::acl_interceptor_factories; +mod access_control; +use access_control::acl_interceptor_factories; -mod authz; +mod authorization; use super::RoutingContext; use zenoh_config::Config; From 79858264e65ca3a7301757c89c228f03313a0e7c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 4 Mar 2024 09:58:44 +0100 Subject: [PATCH 071/122] WIP: Cleaning code --- .../net/routing/interceptor/access_control.rs | 44 +++++++++---------- .../net/routing/interceptor/authorization.rs | 14 ++++-- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index ef614455c8..58915d90b3 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -14,16 +14,16 @@ use super::{ InterceptorFactoryTrait, InterceptorTrait, }; pub(crate) struct AclEnforcer { - pub(crate) e: Arc, + pub(crate) enforcer: Arc, } struct EgressAclEnforcer { - pe: Arc, + policy_enforcer: Arc, interface_list: Vec, default_decision: bool, } struct IngressAclEnforcer { - pe: Arc, + policy_enforcer: Arc, interface_list: Vec, default_decision: bool, } @@ -37,12 +37,12 @@ pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult { log::info!("Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { - e: Arc::new(policy_enforcer), + enforcer: Arc::new(policy_enforcer), })) } - Err(e) => log::error!( + Err(enforcer) => log::error!( "Access control enabled but not initialized with error {}!", - e + enforcer ), } } else { @@ -60,8 +60,8 @@ impl InterceptorFactoryTrait for AclEnforcer { let mut interface_list: Vec = Vec::new(); if let Ok(links) = transport.get_links() { for link in links { - let e = self.e.clone(); - if let Some(subject_map) = &e.subject_map { + let enforcer = self.enforcer.clone(); + if let Some(subject_map) = &enforcer.subject_map { for face in link.interfaces { let subject = &Subject::Interface(face); match subject_map.get(subject) { @@ -72,20 +72,20 @@ impl InterceptorFactoryTrait for AclEnforcer { } } } - let pe = self.e.clone(); + let policy_enforcer = self.enforcer.clone(); ( Some(Box::new(IngressAclEnforcer { - pe: pe.clone(), + policy_enforcer: policy_enforcer.clone(), interface_list: interface_list.clone(), - default_decision: match pe.default_permission { + default_decision: match policy_enforcer.default_permission { Permission::Allow => true, Permission::Deny => false, }, })), Some(Box::new(EgressAclEnforcer { - pe: pe.clone(), + policy_enforcer: policy_enforcer.clone(), interface_list, - default_decision: match pe.default_permission { + default_decision: match policy_enforcer.default_permission { Permission::Allow => true, Permission::Deny => false, }, @@ -127,7 +127,7 @@ impl InterceptorTrait for IngressAclEnforcer { let mut decision = self.default_decision; for subject in &self.interface_list { - match self.pe.policy_decision_point( + match self.policy_enforcer.policy_decision_point( *subject, Action::Put, key_expr, @@ -138,8 +138,8 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + Err(enforcer) => { + log::error!("Authorization incomplete due to error {}", enforcer); return None; } } @@ -157,7 +157,7 @@ impl InterceptorTrait for IngressAclEnforcer { { let mut decision = self.default_decision; for subject in &self.interface_list { - match self.pe.policy_decision_point( + match self.policy_enforcer.policy_decision_point( *subject, Action::Get, key_expr, @@ -168,8 +168,8 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + Err(enforcer) => { + log::error!("Authorization incomplete due to error {}", enforcer); return None; } } @@ -198,7 +198,7 @@ impl InterceptorTrait for EgressAclEnforcer { { let mut decision = self.default_decision; for subject in &self.interface_list { - match self.pe.policy_decision_point( + match self.policy_enforcer.policy_decision_point( *subject, Action::Sub, key_expr, @@ -209,8 +209,8 @@ impl InterceptorTrait for EgressAclEnforcer { break; } Ok(false) => continue, - Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + Err(enforcer) => { + log::error!("Authorization incomplete due to error {}", enforcer); return None; } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index afb8bf76e0..daa58fd4c2 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -37,6 +37,9 @@ impl PolicyEnforcer { } } + /* + initializes the policy_enforcer + */ pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { self.acl_enabled = acl_config.enabled; self.default_permission = acl_config.default_permission; @@ -84,11 +87,14 @@ impl PolicyEnforcer { } Ok(()) } + + /* + converts the sets of rules from config format into individual rules for each subject, key-expr, action, permission + */ pub fn policy_information_point( &self, config_rule_set: Vec, ) -> ZResult { - //convert the sets of rules from coifig format into individual rules for each subject, key-expr, action, permission let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { for subject in &config_rule.interface { @@ -131,10 +137,10 @@ impl PolicyEnforcer { Some(policy_map) => { match policy_map.0.get(&subject) { Some(single_policy) => { - let perm_vec = &single_policy.0[action as usize]; + let permission_vec = &single_policy.0[action as usize]; //explicit Deny rules are ALWAYS given preference - let deny_result = perm_vec[Permission::Deny as usize] + let deny_result = permission_vec[Permission::Deny as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); if deny_result != 0 { @@ -144,7 +150,7 @@ impl PolicyEnforcer { if self.default_permission == Permission::Allow { Ok(true) } else { - let allow_result = perm_vec[Permission::Allow as usize] + let allow_result = permission_vec[Permission::Allow as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); Ok(allow_result != 0) From c6378d13a7b5d59a1002dc8051bac22bc09260b7 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 5 Mar 2024 15:41:09 +0100 Subject: [PATCH 072/122] WIP: Config changes after discussion --- DEFAULT_CONFIG.json5 | 78 ++-- commons/zenoh-config/src/lib.rs | 9 +- examples/examples/z_pub_thr.rs | 2 +- pub_config.json5 | 413 ------------------ rules_test.json5 | 68 --- sub_config.json5 | 413 ------------------ zenoh/Cargo.toml | 3 +- .../net/routing/interceptor/access_control.rs | 42 +- 8 files changed, 77 insertions(+), 951 deletions(-) delete mode 100644 pub_config.json5 delete mode 100644 rules_test.json5 delete mode 100644 sub_config.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 02679690c6..723d114b46 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -6,9 +6,9 @@ /// that zenoh runtime will use. /// If not set, a random unsigned 128bit integer will be used. /// WARNING: this id must be unique in your zenoh network. - id: "aaabbb11006ad57868988f9fec672a31", + // id: "1234567890abcdef", /// The node's mode (router, peer or client) - mode: "router", + mode: "peer", /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", @@ -16,6 +16,8 @@ }, /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 connect: { endpoints: [ // "/
" @@ -24,6 +26,8 @@ /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 listen: { endpoints: [ // "/
" @@ -209,10 +213,10 @@ /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. /// Higher values lead to a more aggressive batching but it will introduce additional latency. backoff: 100, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, }, /// Configure the zenoh RX parameters of a link rx: { @@ -273,37 +277,39 @@ known_keys_file: null, }, }, - acl: { - "enabled": true, - "default_permission": "Deny", - "rules": [ - { - "interface": [ - "lo0" - ], - "key_expr": [ - "test/thr" - ], - "action": [ - "Put", - "Get" - ], - "permission": "Allow" - }, - { - "interface": [ - "lo0" - ], - "key_expr": [ - "test/thr" - ], - "action": [ - "Sub" - ], - "permission": "Allow" - } - ] - } + // acl: { + // "enabled": true, //[true/false] acl will be activated only if this is set to true + // "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) + // "rules": //[action,permission,key_expression,subject] + // [ + // { + // "action": [ + // "put", + // "get" + // ], + // "permission": "allow", + // "key_expr": [ + // "test/thr", + // "test/other" + // ], + // "interface": [ + // "lo0" + // ] + // }, + // { + // "action": [ + // "declare_sub" + // ], + // "permission": "allow", + // "interface": [ + // "lo0" + // ], + // "key_expr": [ + // "test/thr" + // ] + // } + // ] + // } }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index b2c5ca9cae..0b7e2cc404 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -117,21 +117,26 @@ pub struct PolicyRule { #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] +#[serde(rename_all = "snake_case")] + pub enum Subject { Interface(String), //Username(String) } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "snake_case")] pub enum Action { Put, - Sub, + DeclareSub, Get, - Queryable, + DeclareQueryable, } pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (change according to Action size) #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[serde(rename_all = "lowercase")] + pub enum Permission { Allow, Deny, diff --git a/examples/examples/z_pub_thr.rs b/examples/examples/z_pub_thr.rs index df91a74cbc..3e130e0608 100644 --- a/examples/examples/z_pub_thr.rs +++ b/examples/examples/z_pub_thr.rs @@ -28,7 +28,7 @@ fn main() { prio = p.try_into().unwrap(); } - let payload_size: usize = args.payload_size; + let payload_size = args.payload_size; let data: Value = (0..payload_size) .map(|i| (i % 10) as u8) diff --git a/pub_config.json5 b/pub_config.json5 deleted file mode 100644 index 5059e01267..0000000000 --- a/pub_config.json5 +++ /dev/null @@ -1,413 +0,0 @@ -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - id: "aaa3b411006ad57868988f9fec672a31", - /// The node's mode (router, peer or client) - mode: "client", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - metadata: { - name: "strawberry", - location: "Penny Lane" - }, - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - connect: { - endpoints: [ - // "/
" - ], - }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - listen: { - endpoints: [ - // "/
" - ], - }, - // /// Configure the scouting mechanisms and their behaviours - // scouting: { - // /// In client mode, the period dedicated to scouting for a router before failing - // timeout: 3000, - // /// In peer mode, the period dedicated to scouting remote peers before attempting other operations - // delay: 200, - // /// The multicast scouting configuration. - // multicast: { - // /// Whether multicast scouting is enabled or not - // enabled: true, - // /// The socket which should be used for multicast scouting - // address: "224.0.0.224:7446", - // /// The network interface which should be used for multicast scouting - // interface: "auto", // If not set or set to "auto" the interface if picked automatically - // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - // /// Accepts a single value or different values for router, peer and client. - // /// Each value is bit-or-like combinations of "peer", "router" and "client". - // autoconnect: { router: "", peer: "router|peer" }, - // /// Whether or not to listen for scout messages on UDP multicast and reply to them. - // listen: true, - // }, - // /// The gossip scouting configuration. - // gossip: { - // /// Whether gossip scouting is enabled or not - // enabled: true, - // /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - // /// When false, gossip scouting informations are only propagated to the next hop. - // /// Activating multihop gossip implies more scouting traffic and a lower scalability. - // /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have - // /// direct connectivity with each other. - // multihop: false, - // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - // /// Accepts a single value or different values for router, peer and client. - // /// Each value is bit-or-like combinations of "peer", "router" and "client". - // autoconnect: { router: "", peer: "router|peer" }, - // }, - // }, - // /// Configuration of data messages timestamps management. - // timestamping: { - // /// Whether data messages should be timestamped if not already. - // /// Accepts a single boolean value or different values for router, peer and client. - // enabled: { router: true, peer: false, client: false }, - // /// Whether data messages with timestamps in the future should be dropped or not. - // /// If set to false (default), messages with timestamps in the future are retimestamped. - // /// Timestamps are ignored if timestamping is disabled. - // drop_future_timestamp: false, - // }, - // /// The default timeout to apply to queries in milliseconds. - // queries_default_timeout: 10000, - // /// The routing strategy to use and it's configuration. - // routing: { - // /// The routing strategy to use in routers and it's configuration. - // router: { - // /// When set to true a router will forward data between two peers - // /// directly connected to it if it detects that those peers are not - // /// connected to each other. - // /// The failover brokering only works if gossip discovery is enabled. - // peers_failover_brokering: true, - // }, - // /// The routing strategy to use in peers and it's configuration. - // peer: { - // /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). - // mode: "peer_to_peer", - // }, - // }, - // // /// The declarations aggregation strategy. - // // aggregation: { - // // /// A list of key-expressions for which all included subscribers will be aggregated into. - // // subscribers: [ - // // // key_expression - // // ], - // // /// A list of key-expressions for which all included publishers will be aggregated into. - // // publishers: [ - // // // key_expression - // // ], - // // }, - // /// Configure internal transport parameters - transport: { - // acl: { - // "enabled": false, - // "blacklist": false, - // "rules": null, - // }, - } - // transport: { - // unicast: { - // /// Timeout in milliseconds when opening a link - // accept_timeout: 10000, - // /// Maximum number of zenoh session in pending state while accepting - // accept_pending: 100, - // /// Maximum number of sessions that can be simultaneously alive - // max_sessions: 1000, - // /// Maximum number of incoming links that are admitted per session - // max_links: 1, - // /// Enables the LowLatency transport - // /// This option does not make LowLatency transport mandatory, the actual implementation of transport - // /// used will depend on Establish procedure and other party's settings - // /// - // /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - // /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - // /// enable 'lowlatency' you need to explicitly disable 'qos'. - // lowlatency: false, - // /// Enables QoS on unicast communications. - // qos: { - // enabled: true, - // }, - // /// Enables compression on unicast communications. - // /// Compression capabilities are negotiated during session establishment. - // /// If both Zenoh nodes support compression, then compression is activated. - // compression: { - // enabled: false, - // }, - // }, - // multicast: { - // /// Enables QoS on multicast communication. - // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - // qos: { - // enabled: false, - // }, - // /// Enables compression on multicast communication. - // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - // compression: { - // enabled: false, - // }, - // }, - // link: { - // /// An optional whitelist of protocols to be used for accepting and opening sessions. - // /// If not configured, all the supported protocols are automatically whitelisted. - // /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] - // /// For example, to only enable "tls" and "quic": - // // protocols: ["tls", "quic"], - // /// Configure the zenoh TX parameters of a link - // tx: { - // /// The resolution in bits to be used for the message sequence numbers. - // /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - // /// Accepted values: 8bit, 16bit, 32bit, 64bit. - // sequence_number_resolution: "32bit", - // /// Link lease duration in milliseconds to announce to other zenoh nodes - // lease: 10000, - // /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - // /// messages will be sent at the configured time interval. - // /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - // /// set the actual keep_alive timeout to one fourth of the lease time. - // /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - // /// check which considers a link as failed when no messages are received in 3.5 times the - // /// target interval. - // keep_alive: 4, - // /// Batch size in bytes is expressed as a 16bit unsigned integer. - // /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - // /// The default batch size value is the maximum batch size: 65535. - // batch_size: 65535, - // /// Each zenoh link has a transmission queue that can be configured - // queue: { - // /// The size of each priority queue indicates the number of batches a given queue can contain. - // /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - // /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - // /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - // /// If qos is false, then only the DATA priority will be allocated. - // size: { - // control: 1, - // real_time: 1, - // interactive_high: 1, - // interactive_low: 1, - // data_high: 2, - // data: 4, - // data_low: 4, - // background: 4, - // }, - // /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - // /// Higher values lead to a more aggressive batching but it will introduce additional latency. - // backoff: 100, - // // Number of threads dedicated to transmission - // // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // // threads: 4, - // }, - // }, - // /// Configure the zenoh RX parameters of a link - // rx: { - // /// Receiving buffer size in bytes for each link - // /// The default the rx_buffer_size value is the same as the default batch size: 65335. - // /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - // /// more in-flight data. This is particularly relevant when dealing with large messages. - // /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - // buffer_size: 65535, - // /// Maximum size of the defragmentation buffer at receiver end. - // /// Fragmented messages that are larger than the configured size will be dropped. - // /// The default value is 1GiB. This would work in most scenarios. - // /// NOTE: reduce the value if you are operating on a memory constrained device. - // max_message_size: 1073741824, - // }, - // /// Configure TLS specific parameters - // tls: { - // /// Path to the certificate of the certificate authority used to validate either the server - // /// or the client's keys and certificates, depending on the node's mode. If not specified - // /// on router mode then the default WebPKI certificates are used instead. - // root_ca_certificate: null, - // /// Path to the TLS server private key - // server_private_key: null, - // /// Path to the TLS server public certificate - // server_certificate: null, - // /// Client authentication, if true enables mTLS (mutual authentication) - // client_auth: false, - // /// Path to the TLS client private key - // client_private_key: null, - // /// Path to the TLS client public certificate - // client_certificate: null, - // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - // server_name_verification: null, - // }, - // }, - // /// Shared memory configuration - // shared_memory: { - // enabled: false, - // }, - // /// Access control configuration - // auth: { - // /// The configuration of authentification. - // /// A password implies a username is required. - // usrpwd: { - // user: null, - // password: null, - // /// The path to a file containing the user password dictionary - // dictionary_file: null, - // }, - // pubkey: { - // public_key_pem: null, - // private_key_pem: null, - // public_key_file: null, - // private_key_file: null, - // key_size: null, - // known_keys_file: null, - // }, - // }, - // }, - // /// Configure the Admin Space - // /// Unstable: this configuration part works as advertised, but may change in a future release - // adminspace: { - // // read and/or write permissions on the admin space - // permissions: { - // read: true, - // write: false, - // }, - // }, - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, -} \ No newline at end of file diff --git a/rules_test.json5 b/rules_test.json5 deleted file mode 100644 index 8b133f6fcf..0000000000 --- a/rules_test.json5 +++ /dev/null @@ -1,68 +0,0 @@ -{ - "policy_definition": "UserID", - "rules": [ - { - "attribute_name": "UserID", - "attribute_rules": [ - { - "sub": "aaa3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "bbb3b411006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Read", - "permission": true - }, - { - "sub": "aaabbb11006ad57868988f9fec672a31", - "ke": "test/thr", - "action": "Write", - "permission": true - } - ] - }, - { - "attribute_name": "NetworkType", - "attribute_rules": [ - { - "sub": "wifi", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "wifi", - "ke": "test/thr", - "action": "Read", - "permission": true - } - ] - }, - { - "attribute_name": "location", - "attribute_rules": [ - { - "sub": "location_1", - "ke": "test/thr", - "action": "Write", - "permission": true - }, - { - "sub": "location_2", - "ke": "test/thr", - "action": "Read", - "permission": true - } - ] - } - ] -} \ No newline at end of file diff --git a/sub_config.json5 b/sub_config.json5 deleted file mode 100644 index 8828cdbf55..0000000000 --- a/sub_config.json5 +++ /dev/null @@ -1,413 +0,0 @@ -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - id: "bbb3b411006ad57868988f9fec672a31", - /// The node's mode (router, peer or client) - mode: "client", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - metadata: { - name: "blueberry", - location: "Dollar Street" - }, - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - connect: { - endpoints: [ - // "/
" - ], - }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - listen: { - endpoints: [ - // "/
" - ], - }, - transport: { - // acl: { - // "enabled": false, - // "blacklist": false, - // "rules": null, - // }, - } - // /// Configure the scouting mechanisms and their behaviours - // scouting: { - // /// In client mode, the period dedicated to scouting for a router before failing - // timeout: 3000, - // /// In peer mode, the period dedicated to scouting remote peers before attempting other operations - // delay: 200, - // /// The multicast scouting configuration. - // multicast: { - // /// Whether multicast scouting is enabled or not - // enabled: true, - // /// The socket which should be used for multicast scouting - // address: "224.0.0.224:7446", - // /// The network interface which should be used for multicast scouting - // interface: "auto", // If not set or set to "auto" the interface if picked automatically - // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. - // /// Accepts a single value or different values for router, peer and client. - // /// Each value is bit-or-like combinations of "peer", "router" and "client". - // autoconnect: { router: "", peer: "router|peer" }, - // /// Whether or not to listen for scout messages on UDP multicast and reply to them. - // listen: true, - // }, - // /// The gossip scouting configuration. - // gossip: { - // /// Whether gossip scouting is enabled or not - // enabled: true, - // /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. - // /// When false, gossip scouting informations are only propagated to the next hop. - // /// Activating multihop gossip implies more scouting traffic and a lower scalability. - // /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have - // /// direct connectivity with each other. - // multihop: false, - // /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. - // /// Accepts a single value or different values for router, peer and client. - // /// Each value is bit-or-like combinations of "peer", "router" and "client". - // autoconnect: { router: "", peer: "router|peer" }, - // }, - // }, - // /// Configuration of data messages timestamps management. - // timestamping: { - // /// Whether data messages should be timestamped if not already. - // /// Accepts a single boolean value or different values for router, peer and client. - // enabled: { router: true, peer: false, client: false }, - // /// Whether data messages with timestamps in the future should be dropped or not. - // /// If set to false (default), messages with timestamps in the future are retimestamped. - // /// Timestamps are ignored if timestamping is disabled. - // drop_future_timestamp: false, - // }, - // /// The default timeout to apply to queries in milliseconds. - // queries_default_timeout: 10000, - // /// The routing strategy to use and it's configuration. - // routing: { - // /// The routing strategy to use in routers and it's configuration. - // router: { - // /// When set to true a router will forward data between two peers - // /// directly connected to it if it detects that those peers are not - // /// connected to each other. - // /// The failover brokering only works if gossip discovery is enabled. - // peers_failover_brokering: true, - // }, - // /// The routing strategy to use in peers and it's configuration. - // peer: { - // /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). - // mode: "peer_to_peer", - // }, - // }, - // // /// The declarations aggregation strategy. - // // aggregation: { - // // /// A list of key-expressions for which all included subscribers will be aggregated into. - // // subscribers: [ - // // // key_expression - // // ], - // // /// A list of key-expressions for which all included publishers will be aggregated into. - // // publishers: [ - // // // key_expression - // // ], - // // }, - // /// Configure internal transport parameters - // transport: { - // unicast: { - // /// Timeout in milliseconds when opening a link - // accept_timeout: 10000, - // /// Maximum number of zenoh session in pending state while accepting - // accept_pending: 100, - // /// Maximum number of sessions that can be simultaneously alive - // max_sessions: 1000, - // /// Maximum number of incoming links that are admitted per session - // max_links: 1, - // /// Enables the LowLatency transport - // /// This option does not make LowLatency transport mandatory, the actual implementation of transport - // /// used will depend on Establish procedure and other party's settings - // /// - // /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - // /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - // /// enable 'lowlatency' you need to explicitly disable 'qos'. - // lowlatency: false, - // /// Enables QoS on unicast communications. - // qos: { - // enabled: true, - // }, - // /// Enables compression on unicast communications. - // /// Compression capabilities are negotiated during session establishment. - // /// If both Zenoh nodes support compression, then compression is activated. - // compression: { - // enabled: false, - // }, - // }, - // multicast: { - // /// Enables QoS on multicast communication. - // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - // qos: { - // enabled: false, - // }, - // /// Enables compression on multicast communication. - // /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - // compression: { - // enabled: false, - // }, - // }, - // link: { - // /// An optional whitelist of protocols to be used for accepting and opening sessions. - // /// If not configured, all the supported protocols are automatically whitelisted. - // /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] - // /// For example, to only enable "tls" and "quic": - // // protocols: ["tls", "quic"], - // /// Configure the zenoh TX parameters of a link - // tx: { - // /// The resolution in bits to be used for the message sequence numbers. - // /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - // /// Accepted values: 8bit, 16bit, 32bit, 64bit. - // sequence_number_resolution: "32bit", - // /// Link lease duration in milliseconds to announce to other zenoh nodes - // lease: 10000, - // /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - // /// messages will be sent at the configured time interval. - // /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - // /// set the actual keep_alive timeout to one fourth of the lease time. - // /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - // /// check which considers a link as failed when no messages are received in 3.5 times the - // /// target interval. - // keep_alive: 4, - // /// Batch size in bytes is expressed as a 16bit unsigned integer. - // /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - // /// The default batch size value is the maximum batch size: 65535. - // batch_size: 65535, - // /// Each zenoh link has a transmission queue that can be configured - // queue: { - // /// The size of each priority queue indicates the number of batches a given queue can contain. - // /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - // /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - // /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - // /// If qos is false, then only the DATA priority will be allocated. - // size: { - // control: 1, - // real_time: 1, - // interactive_high: 1, - // interactive_low: 1, - // data_high: 2, - // data: 4, - // data_low: 4, - // background: 4, - // }, - // /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - // /// Higher values lead to a more aggressive batching but it will introduce additional latency. - // backoff: 100, - // // Number of threads dedicated to transmission - // // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // // threads: 4, - // }, - // }, - // /// Configure the zenoh RX parameters of a link - // rx: { - // /// Receiving buffer size in bytes for each link - // /// The default the rx_buffer_size value is the same as the default batch size: 65335. - // /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - // /// more in-flight data. This is particularly relevant when dealing with large messages. - // /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - // buffer_size: 65535, - // /// Maximum size of the defragmentation buffer at receiver end. - // /// Fragmented messages that are larger than the configured size will be dropped. - // /// The default value is 1GiB. This would work in most scenarios. - // /// NOTE: reduce the value if you are operating on a memory constrained device. - // max_message_size: 1073741824, - // }, - // /// Configure TLS specific parameters - // tls: { - // /// Path to the certificate of the certificate authority used to validate either the server - // /// or the client's keys and certificates, depending on the node's mode. If not specified - // /// on router mode then the default WebPKI certificates are used instead. - // root_ca_certificate: null, - // /// Path to the TLS server private key - // server_private_key: null, - // /// Path to the TLS server public certificate - // server_certificate: null, - // /// Client authentication, if true enables mTLS (mutual authentication) - // client_auth: false, - // /// Path to the TLS client private key - // client_private_key: null, - // /// Path to the TLS client public certificate - // client_certificate: null, - // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - // server_name_verification: null, - // }, - // }, - // /// Shared memory configuration - // shared_memory: { - // enabled: false, - // }, - // /// Access control configuration - // auth: { - // /// The configuration of authentification. - // /// A password implies a username is required. - // usrpwd: { - // user: null, - // password: null, - // /// The path to a file containing the user password dictionary - // dictionary_file: null, - // }, - // pubkey: { - // public_key_pem: null, - // private_key_pem: null, - // public_key_file: null, - // private_key_file: null, - // key_size: null, - // known_keys_file: null, - // }, - // }, - // }, - // /// Configure the Admin Space - // /// Unstable: this configuration part works as advertised, but may change in a future release - // adminspace: { - // // read and/or write permissions on the admin space - // permissions: { - // read: true, - // write: false, - // }, - // }, - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, -} \ No newline at end of file diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index b8b51d2907..3686722ea3 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -96,6 +96,7 @@ zenoh-collections = { workspace = true, features = ["std"] } zenoh-config = { workspace = true } zenoh-core = { workspace = true } zenoh-crypto = { workspace = true } +zenoh-keyexpr = { workspace = true } zenoh-link = { workspace = true } zenoh-macros = { workspace = true } zenoh-plugin-trait = { workspace = true } @@ -105,11 +106,9 @@ zenoh-shm = { workspace = true, optional = true } zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } -zenoh-keyexpr = { workspace = true } [build-dependencies] rustc_version = { workspace = true } - [lib] name = "zenoh" diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 58915d90b3..a45a702469 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -1,5 +1,5 @@ use std::sync::Arc; -use zenoh_config::{AclConfig, Action, Permission, Subject}; +use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; use zenoh_protocol::{ network::{NetworkBody, NetworkMessage, Push, Request, Response}, zenoh::{PushBody, RequestBody, ResponseBody}, @@ -21,11 +21,13 @@ struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, default_decision: bool, + zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, default_decision: bool, + zid: ZenohId, } pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult> { @@ -40,9 +42,9 @@ pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult log::error!( + Err(e) => log::error!( "Access control enabled but not initialized with error {}!", - enforcer + e ), } } else { @@ -57,6 +59,12 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { + let mut zid: ZenohId = ZenohId::default(); + if let Ok(id) = transport.get_zid() { + zid = id; + } else { + log::error!("Error in trying to get zid"); + } let mut interface_list: Vec = Vec::new(); if let Ok(links) = transport.get_links() { for link in links { @@ -81,6 +89,7 @@ impl InterceptorFactoryTrait for AclEnforcer { Permission::Allow => true, Permission::Deny => false, }, + zid, })), Some(Box::new(EgressAclEnforcer { policy_enforcer: policy_enforcer.clone(), @@ -89,6 +98,7 @@ impl InterceptorFactoryTrait for AclEnforcer { Permission::Allow => true, Permission::Deny => false, }, + zid, })), ) } @@ -138,18 +148,18 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(enforcer) => { - log::error!("Authorization incomplete due to error {}", enforcer); + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("Unauthorized to Put"); + log::warn!("{} is unauthorized to Put", self.zid); return None; } - log::info!("Authorized access to Put"); + log::info!("{} is authorized access to Put", self.zid); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. @@ -168,18 +178,18 @@ impl InterceptorTrait for IngressAclEnforcer { break; } Ok(false) => continue, - Err(enforcer) => { - log::error!("Authorization incomplete due to error {}", enforcer); + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("Unauthorized to Query/Get"); + log::warn!("{} is unauthorized to Query/Get", self.zid); return None; } - log::info!("Authorized access to Query"); + log::info!("{} is authorized access to Query", self.zid); } Some(ctx) } @@ -200,7 +210,7 @@ impl InterceptorTrait for EgressAclEnforcer { for subject in &self.interface_list { match self.policy_enforcer.policy_decision_point( *subject, - Action::Sub, + Action::DeclareSub, key_expr, self.default_decision, ) { @@ -209,18 +219,18 @@ impl InterceptorTrait for EgressAclEnforcer { break; } Ok(false) => continue, - Err(enforcer) => { - log::error!("Authorization incomplete due to error {}", enforcer); + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("Unauthorized to Sub"); + log::warn!("{} is unauthorized to Sub", self.zid); return None; } - log::info!("Authorized access to Sub"); + log::info!("{} is authorized access to Sub", self.zid); } Some(ctx) } From a19608c82294de249267fdb33f65e94d1d50a319 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 5 Mar 2024 15:46:00 +0100 Subject: [PATCH 073/122] WIP: Config changes after discussion --- .github/workflows/crates_check.sh | 0 .github/workflows/crates_publish.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 .github/workflows/crates_check.sh mode change 100644 => 100755 .github/workflows/crates_publish.sh diff --git a/.github/workflows/crates_check.sh b/.github/workflows/crates_check.sh old mode 100644 new mode 100755 diff --git a/.github/workflows/crates_publish.sh b/.github/workflows/crates_publish.sh old mode 100644 new mode 100755 From 23128e26404f3a54231e4d83abf6992b21e447f2 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 6 Mar 2024 18:27:40 +0100 Subject: [PATCH 074/122] WIP: Adding Queryable --- DEFAULT_CONFIG.json5 | 70 +++++++++--------- commons/zenoh-config/src/lib.rs | 2 +- .../net/routing/interceptor/access_control.rs | 71 ++++++++++++++++++- 3 files changed, 105 insertions(+), 38 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 3f0ab4d007..2e005d0e37 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -8,7 +8,7 @@ /// WARNING: this id must be unique in your zenoh network. // id: "1234567890abcdef", /// The node's mode (router, peer or client) - mode: "peer", + mode: "router", /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", @@ -277,39 +277,41 @@ known_keys_file: null, }, }, - // acl: { - // "enabled": true, //[true/false] acl will be activated only if this is set to true - // "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) - // "rules": //[action,permission,key_expression,subject] - // [ - // { - // "action": [ - // "put", - // "get" - // ], - // "permission": "allow", - // "key_expr": [ - // "test/thr", - // "test/other" - // ], - // "interface": [ - // "lo0" - // ] - // }, - // { - // "action": [ - // "declare_sub" - // ], - // "permission": "allow", - // "interface": [ - // "lo0" - // ], - // "key_expr": [ - // "test/thr" - // ] - // } - // ] - // } + acl: { + "enabled": true, //[true/false] acl will be activated only if this is set to true + "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) + "rules": //[action,permission,key_expression,subject] + [ + { + "action": [ + "put", + "get", + "declare_subscriber" + ], + "permission": "allow", + "key_expr": [ + "test/thr", + "test/other", + "demo/example/**" + ], + "interface": [ + "lo0" + ] + }, + { + "action": [ + "declare_subscriber" + ], + "permission": "allow", + "interface": [ + "lo0" + ], + "key_expr": [ + "test/thr" + ] + } + ] + } }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index e2b41ea850..c16ecf9807 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -128,7 +128,7 @@ pub enum Subject { #[serde(rename_all = "snake_case")] pub enum Action { Put, - DeclareSub, + DeclareSubscriber, Get, DeclareQueryable, } diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index a45a702469..5aadeff7c6 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -1,3 +1,5 @@ +use crate::KeyExpr; +use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; use zenoh_protocol::{ @@ -17,6 +19,18 @@ pub(crate) struct AclEnforcer { pub(crate) enforcer: Arc, } +/* effective blocking of multicast bypass */ +// struct DropAllMsg; + +// impl InterceptorTrait for DropAllMsg { +// fn intercept( +// &self, +// ctx: RoutingContext, +// ) -> Option> { +// None +// } +// } + struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, @@ -116,9 +130,19 @@ impl InterceptorFactoryTrait for AclEnforcer { } impl InterceptorTrait for IngressAclEnforcer { + fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { + // let ke_id = zlock!(self.ke_id); + // if let Some(id) = ke_id.weight_at(&key_expr.clone()) { + // Some(Box::new(Some(*id))) + // } else { + // Some(Box::new(None::)) + // } + None + } fn intercept<'a>( &self, ctx: RoutingContext, + cache: Option<&Box>, ) -> Option> { let key_expr = ctx.full_expr()?; //TODO add caching if let NetworkBody::Push(Push { @@ -196,9 +220,19 @@ impl InterceptorTrait for IngressAclEnforcer { } impl InterceptorTrait for EgressAclEnforcer { + fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { + // let ke_id = zlock!(self.ke_id); + // if let Some(id) = ke_id.weight_at(&key_expr.clone()) { + // Some(Box::new(Some(*id))) + // } else { + // Some(Box::new(None::)) + // } + None + } fn intercept( &self, ctx: RoutingContext, + cache: Option<&Box>, ) -> Option> { let key_expr = ctx.full_expr()?; //TODO add caching if let NetworkBody::Push(Push { @@ -210,7 +244,37 @@ impl InterceptorTrait for EgressAclEnforcer { for subject in &self.interface_list { match self.policy_enforcer.policy_decision_point( *subject, - Action::DeclareSub, + Action::DeclareSubscriber, + key_expr, + self.default_decision, + ) { + Ok(true) => { + decision = true; + break; + } + Ok(false) => continue, + Err(e) => { + log::error!("Authorization incomplete due to error {}", e); + return None; + } + } + } + if !decision { + log::warn!("{} is unauthorized to be Subscriber", self.zid); + return None; + } + + log::info!("{} is authorized access to be Subscriber", self.zid); + } else if let NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) = &ctx.msg.body + { + let mut decision = self.default_decision; + for subject in &self.interface_list { + match self.policy_enforcer.policy_decision_point( + *subject, + Action::DeclareQueryable, key_expr, self.default_decision, ) { @@ -226,12 +290,13 @@ impl InterceptorTrait for EgressAclEnforcer { } } if !decision { - log::warn!("{} is unauthorized to Sub", self.zid); + log::warn!("{} is unauthorized to be Queryable", self.zid); return None; } - log::info!("{} is authorized access to Sub", self.zid); + log::info!("{} is authorized access to be Queryable", self.zid); } + Some(ctx) } } From f066a76d016f6ce8f8a9ec55bd6cbd910dd9007b Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 8 Mar 2024 10:21:25 +0100 Subject: [PATCH 075/122] WIP: Adding key-expr caching --- .gitignore | 5 +- DEFAULT_CONFIG.json5 | 62 ++++++------------- .../net/routing/interceptor/access_control.rs | 25 +++----- 3 files changed, 32 insertions(+), 60 deletions(-) diff --git a/.gitignore b/.gitignore index 83421ea1ae..017bf7c4ca 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,7 @@ .vscode -cargo-timing*.html \ No newline at end of file +cargo-timing*.html + +# #remove json changes +# *.json5 \ No newline at end of file diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 2e005d0e37..66352fe141 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -7,13 +7,16 @@ /// If not set, a random unsigned 128bit integer will be used. /// WARNING: this id must be unique in your zenoh network. // id: "1234567890abcdef", + /// The node's mode (router, peer or client) - mode: "router", + mode: "peer", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", location: "Penny Lane" }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: @@ -23,6 +26,7 @@ // "/
" ], }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. @@ -50,8 +54,7 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, + autoconnect: { router: "", peer: "router|peer" }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, }, @@ -68,23 +71,24 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, + autoconnect: { router: "", peer: "router|peer" }, }, }, + /// Configuration of data messages timestamps management. timestamping: { /// Whether data messages should be timestamped if not already. /// Accepts a single boolean value or different values for router, peer and client. - enabled: { router: true, peer: false, client: false - }, + enabled: { router: true, peer: false, client: false }, /// Whether data messages with timestamps in the future should be dropped or not. /// If set to false (default), messages with timestamps in the future are retimestamped. /// Timestamps are ignored if timestamping is disabled. drop_future_timestamp: false, }, + /// The default timeout to apply to queries in milliseconds. queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. routing: { /// The routing strategy to use in routers and it's configuration. @@ -101,6 +105,7 @@ mode: "peer_to_peer", }, }, + // /// The declarations aggregation strategy. // aggregation: { // /// A list of key-expressions for which all included subscribers will be aggregated into. @@ -112,6 +117,7 @@ // // key_expression // ], // }, + // /// The downsampling declaration. // downsampling: [ // { @@ -125,6 +131,7 @@ // ], // }, // ], + /// Configure internal transport parameters transport: { unicast: { @@ -277,42 +284,8 @@ known_keys_file: null, }, }, - acl: { - "enabled": true, //[true/false] acl will be activated only if this is set to true - "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) - "rules": //[action,permission,key_expression,subject] - [ - { - "action": [ - "put", - "get", - "declare_subscriber" - ], - "permission": "allow", - "key_expr": [ - "test/thr", - "test/other", - "demo/example/**" - ], - "interface": [ - "lo0" - ] - }, - { - "action": [ - "declare_subscriber" - ], - "permission": "allow", - "interface": [ - "lo0" - ], - "key_expr": [ - "test/thr" - ] - } - ] - } }, + /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release adminspace: { @@ -322,6 +295,7 @@ write: false, }, }, + /// /// Plugins configurations /// @@ -449,6 +423,7 @@ // }, // }, // }, + // /// Plugin configuration example using `__config__` property // plugins: { // rest: { @@ -458,4 +433,5 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} \ No newline at end of file + +} diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 5aadeff7c6..46a49429c9 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -131,20 +131,16 @@ impl InterceptorFactoryTrait for AclEnforcer { impl InterceptorTrait for IngressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { - // let ke_id = zlock!(self.ke_id); - // if let Some(id) = ke_id.weight_at(&key_expr.clone()) { - // Some(Box::new(Some(*id))) - // } else { - // Some(Box::new(None::)) - // } - None + Some(Box::new(key_expr.to_string())) } fn intercept<'a>( &self, ctx: RoutingContext, cache: Option<&Box>, ) -> Option> { - let key_expr = ctx.full_expr()?; //TODO add caching + let key_expr = cache + .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) + .or_else(|| ctx.full_expr())?; if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -221,20 +217,17 @@ impl InterceptorTrait for IngressAclEnforcer { impl InterceptorTrait for EgressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { - // let ke_id = zlock!(self.ke_id); - // if let Some(id) = ke_id.weight_at(&key_expr.clone()) { - // Some(Box::new(Some(*id))) - // } else { - // Some(Box::new(None::)) - // } - None + Some(Box::new(key_expr.to_string())) + // None } fn intercept( &self, ctx: RoutingContext, cache: Option<&Box>, ) -> Option> { - let key_expr = ctx.full_expr()?; //TODO add caching + let key_expr = cache + .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) + .or_else(|| ctx.full_expr())?; if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. From 152938d9648707318e833b4e934d996690381faa Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 8 Mar 2024 10:37:24 +0100 Subject: [PATCH 076/122] Cleaning config file --- .gitignore | 3 - DEFAULT_CONFIG.json5 | 57 +++++++++++++------ .../net/routing/interceptor/access_control.rs | 13 ----- 3 files changed, 39 insertions(+), 34 deletions(-) diff --git a/.gitignore b/.gitignore index 017bf7c4ca..695d0464b1 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,3 @@ .vscode cargo-timing*.html - -# #remove json changes -# *.json5 \ No newline at end of file diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 66352fe141..2f712aecd5 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -7,16 +7,13 @@ /// If not set, a random unsigned 128bit integer will be used. /// WARNING: this id must be unique in your zenoh network. // id: "1234567890abcdef", - /// The node's mode (router, peer or client) mode: "peer", - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", location: "Penny Lane" }, - /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: @@ -26,7 +23,6 @@ // "/
" ], }, - /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. @@ -54,7 +50,8 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: "", peer: "router|peer" + }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. listen: true, }, @@ -71,24 +68,23 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" }, + autoconnect: { router: "", peer: "router|peer" + }, }, }, - /// Configuration of data messages timestamps management. timestamping: { /// Whether data messages should be timestamped if not already. /// Accepts a single boolean value or different values for router, peer and client. - enabled: { router: true, peer: false, client: false }, + enabled: { router: true, peer: false, client: false + }, /// Whether data messages with timestamps in the future should be dropped or not. /// If set to false (default), messages with timestamps in the future are retimestamped. /// Timestamps are ignored if timestamping is disabled. drop_future_timestamp: false, }, - /// The default timeout to apply to queries in milliseconds. queries_default_timeout: 10000, - /// The routing strategy to use and it's configuration. routing: { /// The routing strategy to use in routers and it's configuration. @@ -105,7 +101,6 @@ mode: "peer_to_peer", }, }, - // /// The declarations aggregation strategy. // aggregation: { // /// A list of key-expressions for which all included subscribers will be aggregated into. @@ -117,7 +112,6 @@ // // key_expression // ], // }, - // /// The downsampling declaration. // downsampling: [ // { @@ -131,7 +125,6 @@ // ], // }, // ], - /// Configure internal transport parameters transport: { unicast: { @@ -284,8 +277,39 @@ known_keys_file: null, }, }, + // acl: { + // "enabled": false, //[true/false] acl will be activated only if this is set to true + // "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) + // "rules": //[action,permission,key_expression,subject] + // [ + // { + // "action": [ + // "put", + // "get" + // ], + // "permission": "allow", + // "key_expr": [ + // "test/thr" + // ], + // "interface": [ + // "lo0" + // ] + // }, + // { + // "action": [ + // "declare_subscriber" + // ], + // "permission": "allow", + // "interface": [ + // "lo0" + // ], + // "key_expr": [ + // "test/thr" + // ] + // } + // ] + // } }, - /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release adminspace: { @@ -295,7 +319,6 @@ write: false, }, }, - /// /// Plugins configurations /// @@ -423,7 +446,6 @@ // }, // }, // }, - // /// Plugin configuration example using `__config__` property // plugins: { // rest: { @@ -433,5 +455,4 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, - -} +} \ No newline at end of file diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 46a49429c9..0336f3a454 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,19 +18,6 @@ use super::{ pub(crate) struct AclEnforcer { pub(crate) enforcer: Arc, } - -/* effective blocking of multicast bypass */ -// struct DropAllMsg; - -// impl InterceptorTrait for DropAllMsg { -// fn intercept( -// &self, -// ctx: RoutingContext, -// ) -> Option> { -// None -// } -// } - struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, From 7439dba93e6883ad594d85ccc539ab793bf4d4ed Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 28 Mar 2024 11:20:34 +0100 Subject: [PATCH 077/122] made review changes --- DEFAULT_CONFIG-pub.json5 | 18 +++ DEFAULT_CONFIG-sub.json5 | 16 +++ DEFAULT_CONFIG-test.json5 | 49 +++++++ DEFAULT_CONFIG.json5 | 97 +++++++------ commons/zenoh-config/src/lib.rs | 16 +-- .../net/routing/interceptor/access_control.rs | 129 ++++++++++++------ .../net/routing/interceptor/authorization.rs | 27 +++- 7 files changed, 248 insertions(+), 104 deletions(-) create mode 100644 DEFAULT_CONFIG-pub.json5 create mode 100644 DEFAULT_CONFIG-sub.json5 create mode 100644 DEFAULT_CONFIG-test.json5 diff --git a/DEFAULT_CONFIG-pub.json5 b/DEFAULT_CONFIG-pub.json5 new file mode 100644 index 0000000000..6a039c3ce5 --- /dev/null +++ b/DEFAULT_CONFIG-pub.json5 @@ -0,0 +1,18 @@ +{ + mode: "client", + connect: { + endpoints: [ + /// "tcp/192.168.11.1:7447", + "tcp/127.0.0.1:7447", + + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/DEFAULT_CONFIG-sub.json5 b/DEFAULT_CONFIG-sub.json5 new file mode 100644 index 0000000000..7ad3c5d952 --- /dev/null +++ b/DEFAULT_CONFIG-sub.json5 @@ -0,0 +1,16 @@ +{ + mode: "client", + connect: { + endpoints: [ + "tcp/127.0.0.1:7447", + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/DEFAULT_CONFIG-test.json5 b/DEFAULT_CONFIG-test.json5 new file mode 100644 index 0000000000..73a6a8e6dc --- /dev/null +++ b/DEFAULT_CONFIG-test.json5 @@ -0,0 +1,49 @@ +{ + mode: "router", + listen: { + endpoints: [ + "tcp/127.0.0.1:7447", + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, + transport: { + acl: { + "enabled": true, + "default_permission": "deny", + "rules": + [ + { + "permission": "allow", + "action": [ + "put", + ], + "key_expr": [ + "demo/example/**" + ], + "interface": [ + "lo0" + ] + }, + { + "permission": "deny", + "action": [ + "declare_subscriber" + ], + "key_expr": [ + "demo/example/**" + ], + "interface": [ + "lo0" + ] + }, + ] + } + }, +} \ No newline at end of file diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index f81f6a3f4a..e12b29bec6 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -8,7 +8,7 @@ /// WARNING: this id must be unique in your zenoh network. // id: "1234567890abcdef", /// The node's mode (router, peer or client) - mode: "peer", + mode: "router", /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", @@ -21,22 +21,22 @@ connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) /// Accepts a single value or different values for router, peer and client. - timeout_ms: { router: -1, peer: -1, client: 0 }, + timeout_ms: { router: -1, peer: -1, client: 0 + }, endpoints: [ // "/
" ], - /// Global connect configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#retry_period_init_ms=20000;retry_period_max_ms=10000" - /// exit from application, if timeout exceed - exit_on_failure: { router: false, peer: false, client: true }, + exit_on_failure: { router: false, peer: false, client: true + }, /// connect establishing retry configuration - retry: { + retry: { /// intial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try @@ -58,17 +58,15 @@ endpoints: [ // "/
" ], - /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" - /// exit from application, if timeout exceed exit_on_failure: true, /// listen retry configuration - retry: { + retry: { /// intial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try @@ -86,7 +84,7 @@ /// The multicast scouting configuration. multicast: { /// Whether multicast scouting is enabled or not - enabled: true, + enabled: false, /// The socket which should be used for multicast scouting address: "224.0.0.224:7446", /// The network interface which should be used for multicast scouting @@ -97,12 +95,12 @@ autoconnect: { router: "", peer: "router|peer" }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. - listen: true, + listen: false, }, /// The gossip scouting configuration. gossip: { /// Whether gossip scouting is enabled or not - enabled: true, + enabled: false, /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. /// When false, gossip scouting informations are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. @@ -330,38 +328,49 @@ known_keys_file: null, }, }, - // acl: { - // "enabled": false, //[true/false] acl will be activated only if this is set to true - // "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) - // "rules": //[action,permission,key_expression,subject] - // [ - // { - // "action": [ - // "put", - // "get" - // ], - // "permission": "allow", - // "key_expr": [ - // "test/thr" - // ], - // "interface": [ - // "lo0" - // ] - // }, - // { - // "action": [ - // "declare_subscriber" - // ], - // "permission": "allow", - // "interface": [ - // "lo0" - // ], - // "key_expr": [ - // "test/thr" - // ] - // } - // ] - // } + acl: { + "enabled": true, //[true/false] acl will be activated only if this is set to true + "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) + "rules": + [ + { + "action": [ + "put" + ], + "permission": "allow", + "key_expr": [ + "test/thr" + ], + "interface": [ + "lo0" + ] + }, + { + "action": [ + "declare_subscriber" + ], + "permission": "allow", + "interface": [ + "lo0" + ], + "key_expr": [ + "test/thr" + ] + }, + // { + // "action": [ + // "get" + // ], + // "permission": "allow", + // "key_expr": [ + // "demo/example/**" + // ], + // "interface": [ + // "lo0" + // ] + // } + ] + } }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 6b149371e7..94ed2b146e 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -100,11 +100,8 @@ pub struct DownsamplingItemConf { pub flow: DownsamplingFlow, } -//adding datatypes needed for ACL Config - #[derive(Serialize, Debug, Deserialize, Clone)] - -pub struct ConfigRule { +pub struct AclConfigRules { pub interface: Vec, pub key_expr: Vec, pub action: Vec, @@ -112,19 +109,17 @@ pub struct ConfigRule { } #[derive(Clone, Serialize, Debug, Deserialize)] pub struct PolicyRule { - pub subject: Subject, //Subject + pub subject: Subject, pub key_expr: String, - pub action: Action, //Action - pub permission: Permission, //Permission + pub action: Action, + pub permission: Permission, } #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] #[serde(untagged)] #[serde(rename_all = "snake_case")] - pub enum Subject { Interface(String), - //Username(String) } #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] @@ -139,7 +134,6 @@ pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (change according #[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "lowercase")] - pub enum Permission { Allow, Deny, @@ -480,7 +474,7 @@ validated_struct::validator! { pub acl: AclConfig { pub enabled: bool, pub default_permission: Permission, - pub rules: Option> + pub rules: Option> } }, /// Configuration of the admin space. diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 0336f3a454..ca38d27ea2 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -1,10 +1,30 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) + use crate::KeyExpr; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; use zenoh_protocol::{ - network::{NetworkBody, NetworkMessage, Push, Request, Response}, - zenoh::{PushBody, RequestBody, ResponseBody}, + network::{NetworkBody, NetworkMessage, Push, Request}, + zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; @@ -61,41 +81,49 @@ impl InterceptorFactoryTrait for AclEnforcer { transport: &TransportUnicast, ) -> (Option, Option) { let mut zid: ZenohId = ZenohId::default(); - if let Ok(id) = transport.get_zid() { - zid = id; - } else { - log::error!("Error in trying to get zid"); + match transport.get_zid() { + Ok(id) => { + zid = id; + } + Err(e) => { + log::error!("Failed to get zid with error :{}", e); + return (None, None); + } } let mut interface_list: Vec = Vec::new(); - if let Ok(links) = transport.get_links() { - for link in links { - let enforcer = self.enforcer.clone(); - if let Some(subject_map) = &enforcer.subject_map { - for face in link.interfaces { - let subject = &Subject::Interface(face); - match subject_map.get(subject) { - Some(val) => interface_list.push(*val), - None => continue, + match transport.get_links() { + Ok(links) => { + for link in links { + let enforcer = self.enforcer.clone(); + if let Some(subject_map) = &enforcer.subject_map { + for face in link.interfaces { + let subject = &Subject::Interface(face); + if let Some(val) = subject_map.get(subject) { + interface_list.push(*val); + } } } } } + Err(e) => { + log::error!("Couldn't get interface list with error: {}", e); + return (None, None); + } } - let policy_enforcer = self.enforcer.clone(); ( Some(Box::new(IngressAclEnforcer { - policy_enforcer: policy_enforcer.clone(), + policy_enforcer: self.enforcer.clone(), interface_list: interface_list.clone(), - default_decision: match policy_enforcer.default_permission { + default_decision: match self.enforcer.default_permission { Permission::Allow => true, Permission::Deny => false, }, zid, })), Some(Box::new(EgressAclEnforcer { - policy_enforcer: policy_enforcer.clone(), + policy_enforcer: self.enforcer.clone(), interface_list, - default_decision: match policy_enforcer.default_permission { + default_decision: match self.enforcer.default_permission { Permission::Allow => true, Permission::Deny => false, }, @@ -108,10 +136,13 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { + log::debug!("multicast is not enabled in interceptor"); None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { + log::debug!("multicast is not enabled in interceptor"); + None } } @@ -131,18 +162,9 @@ impl InterceptorTrait for IngressAclEnforcer { if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. - }) - | NetworkBody::Request(Request { - payload: RequestBody::Put(_), - .. - }) - | NetworkBody::Response(Response { - payload: ResponseBody::Put(_), - .. }) = &ctx.msg.body { let mut decision = self.default_decision; - for subject in &self.interface_list { match self.policy_enforcer.policy_decision_point( *subject, @@ -154,19 +176,22 @@ impl InterceptorTrait for IngressAclEnforcer { decision = true; break; } - Ok(false) => continue, + Ok(false) => { + decision = false; + continue; + } Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + log::debug!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("{} is unauthorized to Put", self.zid); + log::debug!("{} is unauthorized to Put", self.zid); return None; } - log::info!("{} is authorized access to Put", self.zid); + log::trace!("{} is authorized access to Put", self.zid); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. @@ -184,9 +209,12 @@ impl InterceptorTrait for IngressAclEnforcer { decision = true; break; } - Ok(false) => continue, + Ok(false) => { + decision = false; + continue; + } Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + log::debug!("Authorization incomplete due to error {}", e); return None; } } @@ -196,7 +224,7 @@ impl InterceptorTrait for IngressAclEnforcer { return None; } - log::info!("{} is authorized access to Query", self.zid); + log::trace!("{} is authorized access to Query", self.zid); } Some(ctx) } @@ -205,7 +233,6 @@ impl InterceptorTrait for IngressAclEnforcer { impl InterceptorTrait for EgressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { Some(Box::new(key_expr.to_string())) - // None } fn intercept( &self, @@ -232,19 +259,22 @@ impl InterceptorTrait for EgressAclEnforcer { decision = true; break; } - Ok(false) => continue, + Ok(false) => { + decision = false; + continue; + } Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + log::debug!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("{} is unauthorized to be Subscriber", self.zid); + log::debug!("{} is unauthorized to be Subscriber", self.zid); return None; } - log::info!("{} is authorized access to be Subscriber", self.zid); + log::trace!("{} is authorized access to be Subscriber", self.zid); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. @@ -262,21 +292,30 @@ impl InterceptorTrait for EgressAclEnforcer { decision = true; break; } - Ok(false) => continue, + Ok(false) => { + decision = false; + continue; + } Err(e) => { - log::error!("Authorization incomplete due to error {}", e); + log::debug!("Authorization incomplete due to error {}", e); return None; } } } if !decision { - log::warn!("{} is unauthorized to be Queryable", self.zid); + log::debug!("{} is unauthorized to be Queryable", self.zid); return None; } - log::info!("{} is authorized access to be Queryable", self.zid); + log::trace!("{} is authorized access to be Queryable", self.zid); } Some(ctx) } } + +#[cfg(tests)] +mod tests { + + pub fn allow_then_deny() {} +} diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index daa58fd4c2..972a4d1378 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -1,7 +1,27 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// + +//! ⚠️ WARNING ⚠️ +//! +//! This module is intended for Zenoh's internal use. +//! +//! [Click here for Zenoh's documentation](../zenoh/index.html) + use ahash::RandomState; use std::collections::HashMap; use zenoh_config::{ - AclConfig, Action, ConfigRule, Permission, PolicyRule, Subject, NUMBER_OF_ACTIONS, + AclConfig, AclConfigRules, Action, Permission, PolicyRule, Subject, NUMBER_OF_ACTIONS, NUMBER_OF_PERMISSIONS, }; use zenoh_keyexpr::keyexpr; @@ -9,7 +29,7 @@ use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this -pub struct PolicyMap(pub HashMap); //index of subject (i32) instead of subject (String) +pub struct PolicyMap(HashMap); //index of subject (i32) instead of subject (String) type KeTreeRule = KeBoxTree; @@ -21,7 +41,6 @@ pub struct PolicyEnforcer { } #[derive(Debug, Clone)] - pub struct PolicyInformation { subject_map: HashMap, policy_rules: Vec, @@ -93,7 +112,7 @@ impl PolicyEnforcer { */ pub fn policy_information_point( &self, - config_rule_set: Vec, + config_rule_set: Vec, ) -> ZResult { let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { From 0a8bf04b9da3163ccc5181d6ebee5bfe172c1637 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 28 Mar 2024 11:21:45 +0100 Subject: [PATCH 078/122] made review changes --- DEFAULT_CONFIG-pub.json5 | 18 -------------- DEFAULT_CONFIG-sub.json5 | 16 ------------- DEFAULT_CONFIG-test.json5 | 49 --------------------------------------- 3 files changed, 83 deletions(-) delete mode 100644 DEFAULT_CONFIG-pub.json5 delete mode 100644 DEFAULT_CONFIG-sub.json5 delete mode 100644 DEFAULT_CONFIG-test.json5 diff --git a/DEFAULT_CONFIG-pub.json5 b/DEFAULT_CONFIG-pub.json5 deleted file mode 100644 index 6a039c3ce5..0000000000 --- a/DEFAULT_CONFIG-pub.json5 +++ /dev/null @@ -1,18 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - /// "tcp/192.168.11.1:7447", - "tcp/127.0.0.1:7447", - - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/DEFAULT_CONFIG-sub.json5 b/DEFAULT_CONFIG-sub.json5 deleted file mode 100644 index 7ad3c5d952..0000000000 --- a/DEFAULT_CONFIG-sub.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - "tcp/127.0.0.1:7447", - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/DEFAULT_CONFIG-test.json5 b/DEFAULT_CONFIG-test.json5 deleted file mode 100644 index 73a6a8e6dc..0000000000 --- a/DEFAULT_CONFIG-test.json5 +++ /dev/null @@ -1,49 +0,0 @@ -{ - mode: "router", - listen: { - endpoints: [ - "tcp/127.0.0.1:7447", - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, - transport: { - acl: { - "enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "action": [ - "put", - ], - "key_expr": [ - "demo/example/**" - ], - "interface": [ - "lo0" - ] - }, - { - "permission": "deny", - "action": [ - "declare_subscriber" - ], - "key_expr": [ - "demo/example/**" - ], - "interface": [ - "lo0" - ] - }, - ] - } - }, -} \ No newline at end of file From 87333fef318647e54bf3adad14752a958891f83e Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 28 Mar 2024 15:09:44 +0100 Subject: [PATCH 079/122] adding review changes for logs and removing bool values --- .gitignore | 3 + .../net/routing/interceptor/access_control.rs | 137 +++++++++--------- .../net/routing/interceptor/authorization.rs | 24 +-- 3 files changed, 87 insertions(+), 77 deletions(-) diff --git a/.gitignore b/.gitignore index 695d0464b1..6a16b1d1d2 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,6 @@ .vscode cargo-timing*.html + +# Remove test data +tests \ No newline at end of file diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index ca38d27ea2..8cc2f10a38 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -41,13 +41,13 @@ pub(crate) struct AclEnforcer { struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - default_decision: bool, + //default_permission: bool, zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - default_decision: bool, + //default_permission: bool, zid: ZenohId, } @@ -58,18 +58,18 @@ pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult { - log::info!("Access control is enabled and initialized"); + log::info!("[ACCESS LOG]: Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { enforcer: Arc::new(policy_enforcer), })) } Err(e) => log::error!( - "Access control enabled but not initialized with error {}!", + "[ACCESS LOG]: Access control enabled but not initialized with error {}!", e ), } } else { - log::warn!("Access Control is disabled in config!"); + log::warn!("[ACCESS LOG]: Access Control is disabled in config!"); } Ok(res) @@ -86,7 +86,7 @@ impl InterceptorFactoryTrait for AclEnforcer { zid = id; } Err(e) => { - log::error!("Failed to get zid with error :{}", e); + log::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); return (None, None); } } @@ -106,7 +106,10 @@ impl InterceptorFactoryTrait for AclEnforcer { } } Err(e) => { - log::error!("Couldn't get interface list with error: {}", e); + log::error!( + "[ACCESS LOG]: Couldn't get interface list with error: {}", + e + ); return (None, None); } } @@ -114,19 +117,12 @@ impl InterceptorFactoryTrait for AclEnforcer { Some(Box::new(IngressAclEnforcer { policy_enforcer: self.enforcer.clone(), interface_list: interface_list.clone(), - default_decision: match self.enforcer.default_permission { - Permission::Allow => true, - Permission::Deny => false, - }, + zid, })), Some(Box::new(EgressAclEnforcer { policy_enforcer: self.enforcer.clone(), interface_list, - default_decision: match self.enforcer.default_permission { - Permission::Allow => true, - Permission::Deny => false, - }, zid, })), ) @@ -136,12 +132,12 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { - log::debug!("multicast is not enabled in interceptor"); + log::debug!("[ACCESS LOG]: Transport Multicast is not enabled in interceptor"); None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - log::debug!("multicast is not enabled in interceptor"); + log::debug!("[ACCESS LOG]: Peer Multicast is not enabled in interceptor"); None } @@ -164,67 +160,63 @@ impl InterceptorTrait for IngressAclEnforcer { .. }) = &ctx.msg.body { - let mut decision = self.default_decision; + let mut decision = self.policy_enforcer.default_permission.clone(); for subject in &self.interface_list { - match self.policy_enforcer.policy_decision_point( - *subject, - Action::Put, - key_expr, - self.default_decision, - ) { - Ok(true) => { - decision = true; + match self + .policy_enforcer + .policy_decision_point(*subject, Action::Put, key_expr) + { + Ok(Permission::Allow) => { + decision = Permission::Allow; break; } - Ok(false) => { - decision = false; + Ok(Permission::Deny) => { + decision = Permission::Deny; continue; } Err(e) => { - log::debug!("Authorization incomplete due to error {}", e); + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); return None; } } } - if !decision { - log::debug!("{} is unauthorized to Put", self.zid); + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to Put", self.zid); return None; } - log::trace!("{} is authorized access to Put", self.zid); + log::trace!("[ACCESS LOG]: {} is authorized access to Put", self.zid); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. }) = &ctx.msg.body { - let mut decision = self.default_decision; + let mut decision = self.policy_enforcer.default_permission.clone(); for subject in &self.interface_list { - match self.policy_enforcer.policy_decision_point( - *subject, - Action::Get, - key_expr, - self.default_decision, - ) { - Ok(true) => { - decision = true; + match self + .policy_enforcer + .policy_decision_point(*subject, Action::Get, key_expr) + { + Ok(Permission::Allow) => { + decision = Permission::Allow; break; } - Ok(false) => { - decision = false; + Ok(Permission::Deny) => { + decision = Permission::Deny; continue; } Err(e) => { - log::debug!("Authorization incomplete due to error {}", e); + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); return None; } } } - if !decision { - log::warn!("{} is unauthorized to Query/Get", self.zid); + if decision == Permission::Deny { + log::warn!("[ACCESS LOG]: {} is unauthorized to Query/Get", self.zid); return None; } - log::trace!("{} is authorized access to Query", self.zid); + log::trace!("[ACCESS LOG]: {} is authorized access to Query", self.zid); } Some(ctx) } @@ -247,73 +239,84 @@ impl InterceptorTrait for EgressAclEnforcer { .. }) = &ctx.msg.body { - let mut decision = self.default_decision; + let mut decision = self.policy_enforcer.default_permission.clone(); for subject in &self.interface_list { match self.policy_enforcer.policy_decision_point( *subject, Action::DeclareSubscriber, key_expr, - self.default_decision, ) { - Ok(true) => { - decision = true; + Ok(Permission::Allow) => { + decision = Permission::Allow; break; } - Ok(false) => { - decision = false; + Ok(Permission::Deny) => { + decision = Permission::Deny; continue; } Err(e) => { - log::debug!("Authorization incomplete due to error {}", e); + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); return None; } } } - if !decision { - log::debug!("{} is unauthorized to be Subscriber", self.zid); + if decision == Permission::Deny { + log::debug!( + "[ACCESS LOG]: {} is unauthorized to be Subscriber", + self.zid + ); return None; } - log::trace!("{} is authorized access to be Subscriber", self.zid); + log::trace!( + "[ACCESS LOG]: {} is authorized access to be Subscriber", + self.zid + ); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. }) = &ctx.msg.body { - let mut decision = self.default_decision; + let mut decision = self.policy_enforcer.default_permission.clone(); for subject in &self.interface_list { match self.policy_enforcer.policy_decision_point( *subject, Action::DeclareQueryable, key_expr, - self.default_decision, ) { - Ok(true) => { - decision = true; + Ok(Permission::Allow) => { + decision = Permission::Allow; break; } - Ok(false) => { - decision = false; + Ok(Permission::Deny) => { + decision = Permission::Deny; continue; } Err(e) => { - log::debug!("Authorization incomplete due to error {}", e); + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); return None; } } } - if !decision { - log::debug!("{} is unauthorized to be Queryable", self.zid); + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to be Queryable", self.zid); return None; } - log::trace!("{} is authorized access to be Queryable", self.zid); + log::trace!( + "[ACCESS LOG]: {} is authorized access to be Queryable", + self.zid + ); } Some(ctx) } } +// pub fn decide_permission() -> ZResult { +// Ok(Permission::Deny) +// } + #[cfg(tests)] mod tests { diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 972a4d1378..60cb42be09 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -65,7 +65,7 @@ impl PolicyEnforcer { if self.acl_enabled { if let Some(rules) = acl_config.rules { if rules.is_empty() { - log::warn!("ACL ruleset in config file is empty!!!"); + log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); self.policy_map = None; self.subject_map = None; } @@ -101,7 +101,7 @@ impl PolicyEnforcer { self.policy_map = Some(main_policy); self.subject_map = Some(subject_map); } else { - log::warn!("No ACL rules have been specified!!!"); + log::warn!("[ACCESS LOG]: No ACL rules have been specified!!!"); } } Ok(()) @@ -150,8 +150,7 @@ impl PolicyEnforcer { subject: i32, action: Action, key_expr: &str, - default_decision: bool, - ) -> ZResult { + ) -> ZResult { match &self.policy_map { Some(policy_map) => { match policy_map.0.get(&subject) { @@ -163,27 +162,32 @@ impl PolicyEnforcer { .nodes_including(keyexpr::new(&key_expr)?) .count(); if deny_result != 0 { - return Ok(false); + return Ok(Permission::Deny); } //if default_permission is Allow, ignore checks for Allow if self.default_permission == Permission::Allow { - Ok(true) + Ok(Permission::Allow) } else { let allow_result = permission_vec[Permission::Allow as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); - Ok(allow_result != 0) + + if allow_result != 0 { + Ok(Permission::Allow) + } else { + Ok(Permission::Deny) + } } } - None => Ok(default_decision), + None => Ok(self.default_permission.clone()), } } None => { //when list is present (not null) but empty if self.default_permission == Permission::Allow { - Ok(true) + Ok(Permission::Allow) } else { - Ok(false) + Ok(Permission::Deny) } } } From 3b4091e1d43c5949e25db2f42bc9b8427f134320 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 28 Mar 2024 21:26:20 +0100 Subject: [PATCH 080/122] adding review changes --- .../net/routing/interceptor/access_control.rs | 87 ++++++++----------- .../net/routing/interceptor/authorization.rs | 1 - 2 files changed, 36 insertions(+), 52 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 8cc2f10a38..af8b43d8d2 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -80,65 +80,61 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, transport: &TransportUnicast, ) -> (Option, Option) { - let mut zid: ZenohId = ZenohId::default(); match transport.get_zid() { - Ok(id) => { - zid = id; - } - Err(e) => { - log::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); - return (None, None); - } - } - let mut interface_list: Vec = Vec::new(); - match transport.get_links() { - Ok(links) => { - for link in links { - let enforcer = self.enforcer.clone(); - if let Some(subject_map) = &enforcer.subject_map { - for face in link.interfaces { - let subject = &Subject::Interface(face); - if let Some(val) = subject_map.get(subject) { - interface_list.push(*val); + Ok(zid) => { + let mut interface_list: Vec = Vec::new(); + match transport.get_links() { + Ok(links) => { + for link in links { + let enforcer = self.enforcer.clone(); + if let Some(subject_map) = &enforcer.subject_map { + for face in link.interfaces { + let subject = &Subject::Interface(face); + if let Some(val) = subject_map.get(subject) { + interface_list.push(*val); + } + } } } } + Err(e) => { + log::error!( + "[ACCESS LOG]: Couldn't get interface list with error: {}", + e + ); + return (None, None); + } } + ( + Some(Box::new(IngressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + interface_list: interface_list.clone(), + zid, + })), + Some(Box::new(EgressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + interface_list, + zid, + })), + ) } Err(e) => { - log::error!( - "[ACCESS LOG]: Couldn't get interface list with error: {}", - e - ); - return (None, None); + log::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); + (None, None) } } - ( - Some(Box::new(IngressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), - - zid, - })), - Some(Box::new(EgressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list, - zid, - })), - ) } fn new_transport_multicast( &self, _transport: &TransportMulticast, ) -> Option { - log::debug!("[ACCESS LOG]: Transport Multicast is not enabled in interceptor"); + log::debug!("[ACCESS LOG]: Transport Multicast is disabled in interceptor"); None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - log::debug!("[ACCESS LOG]: Peer Multicast is not enabled in interceptor"); - + log::debug!("[ACCESS LOG]: Peer Multicast is disabled in interceptor"); None } } @@ -308,17 +304,6 @@ impl InterceptorTrait for EgressAclEnforcer { self.zid ); } - Some(ctx) } } - -// pub fn decide_permission() -> ZResult { -// Ok(Permission::Deny) -// } - -#[cfg(tests)] -mod tests { - - pub fn allow_then_deny() {} -} diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 60cb42be09..9a1e01281c 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -80,7 +80,6 @@ impl PolicyEnforcer { let mut action_rule: Vec = Vec::new(); for _j in 0..NUMBER_OF_PERMISSIONS { let permission_rule = KeTreeRule::new(); - // action_rule.push(permission_rule); } rule.0.push(action_rule); From c7d0475f40f708e906d417e313670b2fbde0468f Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 29 Mar 2024 17:18:39 +0100 Subject: [PATCH 081/122] adding actions for both ingress and egress --- .../net/routing/interceptor/access_control.rs | 313 ++++++++++++------ 1 file changed, 205 insertions(+), 108 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index af8b43d8d2..66e81dd98c 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -22,8 +22,9 @@ use crate::KeyExpr; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; +//use zenoh_keyexpr::key_expr; use zenoh_protocol::{ - network::{NetworkBody, NetworkMessage, Push, Request}, + network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; @@ -41,13 +42,11 @@ pub(crate) struct AclEnforcer { struct EgressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - //default_permission: bool, zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, interface_list: Vec, - //default_permission: bool, zid: ZenohId, } @@ -151,68 +150,39 @@ impl InterceptorTrait for IngressAclEnforcer { let key_expr = cache .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) .or_else(|| ctx.full_expr())?; + if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - let mut decision = self.policy_enforcer.default_permission.clone(); - for subject in &self.interface_list { - match self - .policy_enforcer - .policy_decision_point(*subject, Action::Put, key_expr) - { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return None; - } - } - } - - if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to Put", self.zid); + if self.put(key_expr) == Permission::Deny { return None; } - log::trace!("[ACCESS LOG]: {} is authorized access to Put", self.zid); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. }) = &ctx.msg.body { - let mut decision = self.policy_enforcer.default_permission.clone(); - for subject in &self.interface_list { - match self - .policy_enforcer - .policy_decision_point(*subject, Action::Get, key_expr) - { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return None; - } - } + if self.get(key_expr) == Permission::Deny { + return None; } - if decision == Permission::Deny { - log::warn!("[ACCESS LOG]: {} is unauthorized to Query/Get", self.zid); + } else if let NetworkBody::Declare(Declare { + body: DeclareBody::DeclareSubscriber(_), + .. + }) = &ctx.msg.body + { + if self.declare_subscriber(key_expr) == Permission::Deny { + return None; + } + } else if let NetworkBody::Declare(Declare { + body: DeclareBody::DeclareQueryable(_), + .. + }) = &ctx.msg.body + { + if self.declare_queryable(key_expr) == Permission::Deny { return None; } - - log::trace!("[ACCESS LOG]: {} is authorized access to Query", self.zid); } Some(ctx) } @@ -230,80 +200,207 @@ impl InterceptorTrait for EgressAclEnforcer { let key_expr = cache .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) .or_else(|| ctx.full_expr())?; + if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. }) = &ctx.msg.body { - let mut decision = self.policy_enforcer.default_permission.clone(); - for subject in &self.interface_list { - match self.policy_enforcer.policy_decision_point( - *subject, - Action::DeclareSubscriber, - key_expr, - ) { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return None; - } - } - } - if decision == Permission::Deny { - log::debug!( - "[ACCESS LOG]: {} is unauthorized to be Subscriber", - self.zid - ); + if self.put(key_expr) == Permission::Deny { return None; } - - log::trace!( - "[ACCESS LOG]: {} is authorized access to be Subscriber", - self.zid - ); } else if let NetworkBody::Request(Request { payload: RequestBody::Query(_), .. }) = &ctx.msg.body { - let mut decision = self.policy_enforcer.default_permission.clone(); - for subject in &self.interface_list { - match self.policy_enforcer.policy_decision_point( - *subject, - Action::DeclareQueryable, - key_expr, - ) { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return None; - } - } + if self.get(key_expr) == Permission::Deny { + return None; } - if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to be Queryable", self.zid); + } else if let NetworkBody::Declare(Declare { + body: DeclareBody::DeclareSubscriber(_), + .. + }) = &ctx.msg.body + { + if self.declare_subscriber(key_expr) == Permission::Deny { + return None; + } + } else if let NetworkBody::Declare(Declare { + body: DeclareBody::DeclareQueryable(_), + .. + }) = &ctx.msg.body + { + if self.declare_queryable(key_expr) == Permission::Deny { return None; } - - log::trace!( - "[ACCESS LOG]: {} is authorized access to be Queryable", - self.zid - ); } Some(ctx) } } + +pub trait AclActionMethods { + fn policy_enforcer(&self) -> Arc; + fn interface_list(&self) -> Vec; + fn zid(&self) -> ZenohId; + + fn put(&self, key_expr: &str) -> Permission { + let policy_enforcer = self.policy_enforcer(); + let interface_list = self.interface_list(); + let zid = self.zid(); + let mut decision = policy_enforcer.default_permission.clone(); + for subject in &interface_list { + match policy_enforcer.policy_decision_point(*subject, Action::Put, key_expr) { + Ok(Permission::Allow) => { + decision = Permission::Allow; + break; + } + Ok(Permission::Deny) => { + decision = Permission::Deny; + continue; + } + Err(e) => { + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + return Permission::Deny; + } + } + } + + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to Put", zid); + return Permission::Deny; + } + log::trace!("[ACCESS LOG]: {} is authorized access to Put", zid); + Permission::Allow + } + + fn get(&self, key_expr: &str) -> Permission { + let policy_enforcer = self.policy_enforcer(); + let interface_list = self.interface_list(); + let zid = self.zid(); + let mut decision = policy_enforcer.default_permission.clone(); + for subject in &interface_list { + match policy_enforcer.policy_decision_point(*subject, Action::Get, key_expr) { + Ok(Permission::Allow) => { + decision = Permission::Allow; + break; + } + Ok(Permission::Deny) => { + decision = Permission::Deny; + continue; + } + Err(e) => { + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + return Permission::Deny; + } + } + } + + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to Query/Get", zid); + return Permission::Deny; + } + log::trace!("[ACCESS LOG]: {} is authorized access to Query/Get", zid); + Permission::Allow + } + fn declare_subscriber(&self, key_expr: &str) -> Permission { + let policy_enforcer = self.policy_enforcer(); + let interface_list = self.interface_list(); + let zid = self.zid(); + let mut decision = policy_enforcer.default_permission.clone(); + for subject in &interface_list { + match policy_enforcer.policy_decision_point( + *subject, + Action::DeclareSubscriber, + key_expr, + ) { + Ok(Permission::Allow) => { + decision = Permission::Allow; + break; + } + Ok(Permission::Deny) => { + decision = Permission::Deny; + continue; + } + Err(e) => { + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + return Permission::Deny; + } + } + } + + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to be a Subscriber", zid); + return Permission::Deny; + } + log::trace!( + "[ACCESS LOG]: {} is authorized access to be a Subscriber", + zid + ); + Permission::Allow + } + + fn declare_queryable(&self, key_expr: &str) -> Permission { + let policy_enforcer = self.policy_enforcer(); + let interface_list = self.interface_list(); + let zid = self.zid(); + let mut decision = policy_enforcer.default_permission.clone(); + for subject in &interface_list { + match policy_enforcer.policy_decision_point( + *subject, + Action::DeclareQueryable, + key_expr, + ) { + Ok(Permission::Allow) => { + decision = Permission::Allow; + break; + } + Ok(Permission::Deny) => { + decision = Permission::Deny; + continue; + } + Err(e) => { + log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + return Permission::Deny; + } + } + } + + if decision == Permission::Deny { + log::debug!("[ACCESS LOG]: {} is unauthorized to be a Queryable", zid); + return Permission::Deny; + } + log::trace!( + "[ACCESS LOG]: {} is authorized access to be a Queryable", + zid + ); + Permission::Allow + } +} + +impl AclActionMethods for EgressAclEnforcer { + fn policy_enforcer(&self) -> Arc { + self.policy_enforcer.clone() + } + + fn interface_list(&self) -> Vec { + self.interface_list.clone() + } + + fn zid(&self) -> ZenohId { + self.zid + } +} + +impl AclActionMethods for IngressAclEnforcer { + fn policy_enforcer(&self) -> Arc { + self.policy_enforcer.clone() + } + + fn interface_list(&self) -> Vec { + self.interface_list.clone() + } + + fn zid(&self) -> ZenohId { + self.zid + } +} From e8ac3e6c4fbe6c0aabe993c47f4c4f3463154d73 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 2 Apr 2024 12:34:51 +0200 Subject: [PATCH 082/122] adding ingress and egress flow --- commons/zenoh-config/src/lib.rs | 3 + .../net/routing/interceptor/access_control.rs | 27 ++++++-- .../net/routing/interceptor/authorization.rs | 68 ++++++++++++------- 3 files changed, 70 insertions(+), 28 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 94ed2b146e..9d547f6de3 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -105,14 +105,17 @@ pub struct AclConfigRules { pub interface: Vec, pub key_expr: Vec, pub action: Vec, + pub flow: Vec, pub permission: Permission, } + #[derive(Clone, Serialize, Debug, Deserialize)] pub struct PolicyRule { pub subject: Subject, pub key_expr: String, pub action: Action, pub permission: Permission, + pub flow: DownsamplingFlow, } #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 66e81dd98c..7d8e0b1b0d 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -33,8 +33,8 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; use crate::net::routing::RoutingContext; use super::{ - authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, - InterceptorFactoryTrait, InterceptorTrait, + authorization::Flow, authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, + InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, }; pub(crate) struct AclEnforcer { pub(crate) enforcer: Arc, @@ -242,6 +242,7 @@ pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohId; + fn flow(&self) -> Flow; fn put(&self, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); @@ -249,7 +250,12 @@ pub trait AclActionMethods { let zid = self.zid(); let mut decision = policy_enforcer.default_permission.clone(); for subject in &interface_list { - match policy_enforcer.policy_decision_point(*subject, Action::Put, key_expr) { + match policy_enforcer.policy_decision_point( + *subject, + self.flow(), + Action::Put, + key_expr, + ) { Ok(Permission::Allow) => { decision = Permission::Allow; break; @@ -279,7 +285,12 @@ pub trait AclActionMethods { let zid = self.zid(); let mut decision = policy_enforcer.default_permission.clone(); for subject in &interface_list { - match policy_enforcer.policy_decision_point(*subject, Action::Get, key_expr) { + match policy_enforcer.policy_decision_point( + *subject, + self.flow(), + Action::Get, + key_expr, + ) { Ok(Permission::Allow) => { decision = Permission::Allow; break; @@ -310,6 +321,7 @@ pub trait AclActionMethods { for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, + self.flow(), Action::DeclareSubscriber, key_expr, ) { @@ -347,6 +359,7 @@ pub trait AclActionMethods { for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, + self.flow(), Action::DeclareQueryable, key_expr, ) { @@ -389,6 +402,9 @@ impl AclActionMethods for EgressAclEnforcer { fn zid(&self) -> ZenohId { self.zid } + fn flow(&self) -> Flow { + Flow::Egress + } } impl AclActionMethods for IngressAclEnforcer { @@ -403,4 +419,7 @@ impl AclActionMethods for IngressAclEnforcer { fn zid(&self) -> ZenohId { self.zid } + fn flow(&self) -> Flow { + Flow::Ingress + } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 9a1e01281c..604e0ea9e2 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -21,18 +21,17 @@ use ahash::RandomState; use std::collections::HashMap; use zenoh_config::{ - AclConfig, AclConfigRules, Action, Permission, PolicyRule, Subject, NUMBER_OF_ACTIONS, - NUMBER_OF_PERMISSIONS, + AclConfig, AclConfigRules, Action, DownsamplingFlow, Permission, PolicyRule, Subject, + NUMBER_OF_ACTIONS, NUMBER_OF_PERMISSIONS, }; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; - -pub struct PolicyForSubject(Vec>); //vec of actions over vec of permission for tree of ke for this +pub type Flow = DownsamplingFlow; +pub struct PolicyForSubject(Vec>>); //vec of actions over vec of permission for tree of ke for this pub struct PolicyMap(HashMap); //index of subject (i32) instead of subject (String) type KeTreeRule = KeBoxTree; - pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, @@ -76,13 +75,17 @@ impl PolicyEnforcer { //first initialize the vector of vectors (required to maintain the indices) for index in subject_map.values() { let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); - for _i in 0..NUMBER_OF_ACTIONS { - let mut action_rule: Vec = Vec::new(); - for _j in 0..NUMBER_OF_PERMISSIONS { - let permission_rule = KeTreeRule::new(); - action_rule.push(permission_rule); + for _k in 0..2 { + let mut flow_rule = Vec::new(); + for _i in 0..NUMBER_OF_ACTIONS { + let mut action_rule: Vec = Vec::new(); + for _j in 0..NUMBER_OF_PERMISSIONS { + let permission_rule = KeTreeRule::new(); + action_rule.push(permission_rule); + } + flow_rule.push(action_rule); } - rule.0.push(action_rule); + rule.0.push(flow_rule); } main_policy.0.insert(*index, rule); } @@ -91,7 +94,8 @@ impl PolicyEnforcer { //add key-expression values to the ketree as per the policy rules if let Some(index) = subject_map.get(&rule.subject) { if let Some(single_policy) = main_policy.0.get_mut(index) { - single_policy.0[rule.action as usize][rule.permission as usize] + single_policy.0[rule.flow as usize][rule.action as usize] + [rule.permission as usize] .insert(keyexpr::new(&rule.key_expr)?, true); } }; @@ -116,14 +120,17 @@ impl PolicyEnforcer { let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { for subject in &config_rule.interface { - for action in &config_rule.action { - for key_expr in &config_rule.key_expr { - policy_rules.push(PolicyRule { - subject: Subject::Interface(subject.clone()), - key_expr: key_expr.clone(), - action: action.clone(), - permission: config_rule.permission.clone(), - }) + for flow in &config_rule.flow { + for action in &config_rule.action { + for key_expr in &config_rule.key_expr { + policy_rules.push(PolicyRule { + subject: Subject::Interface(subject.clone()), + key_expr: key_expr.clone(), + action: action.clone(), + permission: config_rule.permission.clone(), + flow: flow.clone(), + }) + } } } } @@ -147,6 +154,7 @@ impl PolicyEnforcer { pub fn policy_decision_point( &self, subject: i32, + flow: Flow, action: Action, key_expr: &str, ) -> ZResult { @@ -154,10 +162,9 @@ impl PolicyEnforcer { Some(policy_map) => { match policy_map.0.get(&subject) { Some(single_policy) => { - let permission_vec = &single_policy.0[action as usize]; - //explicit Deny rules are ALWAYS given preference - let deny_result = permission_vec[Permission::Deny as usize] + let deny_result = single_policy.0[flow.clone() as usize] + [action.clone() as usize][Permission::Deny as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); if deny_result != 0 { @@ -167,7 +174,8 @@ impl PolicyEnforcer { if self.default_permission == Permission::Allow { Ok(Permission::Allow) } else { - let allow_result = permission_vec[Permission::Allow as usize] + let allow_result = single_policy.0[flow as usize][action as usize] + [Permission::Allow as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -191,4 +199,16 @@ impl PolicyEnforcer { } } } + + // pub fn testing_policy_improv(&self, key_expr: &str) -> ZResult> { + // let mut new_rule = KeTreeRule::new(); + // new_rule.insert(keyexpr::new(key_expr)?, true); + // return Ok(RefCell::new(KeTreeRule::new())); + // } + + // pub fn test_breaking(&self) -> ZResult<()> { + // let x = self.testing_policy_improv("test/expression")?; + // let y = x.clone(); + // Ok(()) + // } } From af4141e5bfdd7684251fd7399c8ca79086ca7456 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 2 Apr 2024 13:17:56 +0200 Subject: [PATCH 083/122] cleaning code --- .../net/routing/interceptor/access_control.rs | 11 +- .../net/routing/interceptor/authorization.rs | 142 ++++++++---------- 2 files changed, 64 insertions(+), 89 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 7d8e0b1b0d..bc45a99a8c 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -22,7 +22,6 @@ use crate::KeyExpr; use std::any::Any; use std::sync::Arc; use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; -//use zenoh_keyexpr::key_expr; use zenoh_protocol::{ network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, @@ -86,12 +85,10 @@ impl InterceptorFactoryTrait for AclEnforcer { Ok(links) => { for link in links { let enforcer = self.enforcer.clone(); - if let Some(subject_map) = &enforcer.subject_map { - for face in link.interfaces { - let subject = &Subject::Interface(face); - if let Some(val) = subject_map.get(subject) { - interface_list.push(*val); - } + for face in link.interfaces { + let subject = &Subject::Interface(face); + if let Some(val) = enforcer.subject_map.get(subject) { + interface_list.push(*val); } } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 604e0ea9e2..793cf74216 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -28,15 +28,15 @@ use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; pub type Flow = DownsamplingFlow; -pub struct PolicyForSubject(Vec>>); //vec of actions over vec of permission for tree of ke for this -pub struct PolicyMap(HashMap); //index of subject (i32) instead of subject (String) +pub struct PolicyForSubject(Vec>>); //vec of flows over vec of actions over vec of permissions of tree of kexpr +pub struct PolicyMap(HashMap); //mapping index of subject (i32) instead of actual subject value (String) type KeTreeRule = KeBoxTree; pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, - pub(crate) subject_map: Option>, - pub(crate) policy_map: Option, + pub(crate) subject_map: HashMap, + pub(crate) policy_map: PolicyMap, } #[derive(Debug, Clone)] @@ -50,8 +50,8 @@ impl PolicyEnforcer { PolicyEnforcer { acl_enabled: true, default_permission: Permission::Deny, - subject_map: None, - policy_map: None, + subject_map: HashMap::default(), + policy_map: PolicyMap(HashMap::default()), } } @@ -65,44 +65,44 @@ impl PolicyEnforcer { if let Some(rules) = acl_config.rules { if rules.is_empty() { log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); - self.policy_map = None; - self.subject_map = None; - } - let policy_information = self.policy_information_point(rules)?; + self.policy_map = PolicyMap(HashMap::default()); + self.subject_map = HashMap::default(); + } else { + let policy_information = self.policy_information_point(rules)?; - let subject_map = policy_information.subject_map; - let mut main_policy: PolicyMap = PolicyMap(HashMap::default()); - //first initialize the vector of vectors (required to maintain the indices) - for index in subject_map.values() { - let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); - for _k in 0..2 { - let mut flow_rule = Vec::new(); - for _i in 0..NUMBER_OF_ACTIONS { - let mut action_rule: Vec = Vec::new(); - for _j in 0..NUMBER_OF_PERMISSIONS { - let permission_rule = KeTreeRule::new(); - action_rule.push(permission_rule); + let subject_map = policy_information.subject_map; + let mut main_policy: PolicyMap = PolicyMap(HashMap::default()); + //first initialize the vector of vectors (required to maintain the indices) + for index in subject_map.values() { + let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); + for _k in 0..2 { + let mut flow_rule = Vec::new(); + for _i in 0..NUMBER_OF_ACTIONS { + let mut action_rule: Vec = Vec::new(); + for _j in 0..NUMBER_OF_PERMISSIONS { + let permission_rule = KeTreeRule::new(); + action_rule.push(permission_rule); + } + flow_rule.push(action_rule); } - flow_rule.push(action_rule); + rule.0.push(flow_rule); } - rule.0.push(flow_rule); + main_policy.0.insert(*index, rule); } - main_policy.0.insert(*index, rule); - } - for rule in policy_information.policy_rules { - //add key-expression values to the ketree as per the policy rules - if let Some(index) = subject_map.get(&rule.subject) { - if let Some(single_policy) = main_policy.0.get_mut(index) { - single_policy.0[rule.flow as usize][rule.action as usize] - [rule.permission as usize] - .insert(keyexpr::new(&rule.key_expr)?, true); - } - }; + for rule in policy_information.policy_rules { + //add key-expression values to the ketree as per the policy rules + if let Some(index) = subject_map.get(&rule.subject) { + if let Some(single_policy) = main_policy.0.get_mut(index) { + single_policy.0[rule.flow as usize][rule.action as usize] + [rule.permission as usize] + .insert(keyexpr::new(&rule.key_expr)?, true); + } + }; + } + self.policy_map = main_policy; + self.subject_map = subject_map; } - //add to the policy_enforcer - self.policy_map = Some(main_policy); - self.subject_map = Some(subject_map); } else { log::warn!("[ACCESS LOG]: No ACL rules have been specified!!!"); } @@ -158,57 +158,35 @@ impl PolicyEnforcer { action: Action, key_expr: &str, ) -> ZResult { - match &self.policy_map { - Some(policy_map) => { - match policy_map.0.get(&subject) { - Some(single_policy) => { - //explicit Deny rules are ALWAYS given preference - let deny_result = single_policy.0[flow.clone() as usize] - [action.clone() as usize][Permission::Deny as usize] - .nodes_including(keyexpr::new(&key_expr)?) - .count(); - if deny_result != 0 { - return Ok(Permission::Deny); - } - //if default_permission is Allow, ignore checks for Allow - if self.default_permission == Permission::Allow { - Ok(Permission::Allow) - } else { - let allow_result = single_policy.0[flow as usize][action as usize] - [Permission::Allow as usize] - .nodes_including(keyexpr::new(&key_expr)?) - .count(); + let policy_map = &self.policy_map; - if allow_result != 0 { - Ok(Permission::Allow) - } else { - Ok(Permission::Deny) - } - } - } - None => Ok(self.default_permission.clone()), + match policy_map.0.get(&subject) { + Some(single_policy) => { + //explicit Deny rules are ALWAYS given preference + let deny_result = single_policy.0[flow.clone() as usize][action.clone() as usize] + [Permission::Deny as usize] + .nodes_including(keyexpr::new(&key_expr)?) + .count(); + if deny_result != 0 { + return Ok(Permission::Deny); } - } - None => { - //when list is present (not null) but empty + //if default_permission is Allow, ignore checks for Allow if self.default_permission == Permission::Allow { Ok(Permission::Allow) } else { - Ok(Permission::Deny) + let allow_result = single_policy.0[flow as usize][action as usize] + [Permission::Allow as usize] + .nodes_including(keyexpr::new(&key_expr)?) + .count(); + + if allow_result != 0 { + Ok(Permission::Allow) + } else { + Ok(Permission::Deny) + } } } + None => Ok(self.default_permission.clone()), } } - - // pub fn testing_policy_improv(&self, key_expr: &str) -> ZResult> { - // let mut new_rule = KeTreeRule::new(); - // new_rule.insert(keyexpr::new(key_expr)?, true); - // return Ok(RefCell::new(KeTreeRule::new())); - // } - - // pub fn test_breaking(&self) -> ZResult<()> { - // let x = self.testing_policy_improv("test/expression")?; - // let y = x.clone(); - // Ok(()) - // } } From 74c58e50625dbd77296f295cce5d9efaa04d8a4c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 3 Apr 2024 17:02:32 +0200 Subject: [PATCH 084/122] adding tests --- .gitignore | 2 +- .../net/routing/interceptor/authorization.rs | 4 +- zenoh/tests/acl.rs | 375 ++++++++++++++++++ 3 files changed, 379 insertions(+), 2 deletions(-) create mode 100644 zenoh/tests/acl.rs diff --git a/.gitignore b/.gitignore index 6a16b1d1d2..82055d6af7 100644 --- a/.gitignore +++ b/.gitignore @@ -22,4 +22,4 @@ cargo-timing*.html # Remove test data -tests \ No newline at end of file +testsfiles \ No newline at end of file diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 793cf74216..92b61221c6 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -28,7 +28,9 @@ use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; pub type Flow = DownsamplingFlow; -pub struct PolicyForSubject(Vec>>); //vec of flows over vec of actions over vec of permissions of tree of kexpr +type PermissionVec = Vec; +type ActionVec = Vec; +pub struct PolicyForSubject(Vec); //vec of flows over vec of actions over vec of permissions of tree of kexpr pub struct PolicyMap(HashMap); //mapping index of subject (i32) instead of actual subject value (String) type KeTreeRule = KeBoxTree; diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs new file mode 100644 index 0000000000..3e88d449c5 --- /dev/null +++ b/zenoh/tests/acl.rs @@ -0,0 +1,375 @@ +use std::sync::{Arc, Mutex}; +use zenoh::prelude::sync::*; +use zenoh_config::Config; +use zenoh_core::zlock; + +#[test] +fn test_acl() { + env_logger::init(); + test_pub_sub_allow(); + test_pub_sub_deny(); + test_pub_sub_allow_then_deny(); + test_pub_sub_deny_then_allow(); +} + +fn test_pub_sub_deny() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "deny", + "rules": + [ + ] + } + }"#, + ) + .unwrap(); + + let mut config_sub = Config::default(); + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + + let _session = zenoh::open(config_router).res().unwrap(); + + let sub_session = zenoh::open(config_sub).res().unwrap(); + let pub_session = zenoh::open(config_pub).res().unwrap(); + + let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); + + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + + let _subscriber = &sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(10)); + publisher.put(VALUE).res().unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + assert_ne!(*zlock!(received_value), VALUE); +} + +fn test_pub_sub_allow() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "allow", + "rules": + [ + ] + } + }"#, + ) + .unwrap(); + + let mut config_sub = Config::default(); + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + + let _session = zenoh::open(config_router).res().unwrap(); + + let sub_session = zenoh::open(config_sub).res().unwrap(); + let pub_session = zenoh::open(config_pub).res().unwrap(); + + let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); + + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(10)); + publisher.put(VALUE).res().unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + assert_eq!(*zlock!(received_value), VALUE); +} + +fn test_pub_sub_allow_then_deny() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "allow", + "rules": + [ + { + "permission": "deny", + "flow": ["egress"], + "action": [ + "put", + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "lo0" + ] + }, + ] + } + }"#, + ) + .unwrap(); + + let mut config_sub = Config::default(); + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + + let _session = zenoh::open(config_router).res().unwrap(); + + let sub_session = zenoh::open(config_sub).res().unwrap(); + let pub_session = zenoh::open(config_pub).res().unwrap(); + + let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); + + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(10)); + publisher.put(VALUE).res().unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + assert_ne!(*zlock!(received_value), VALUE); +} + +fn test_pub_sub_deny_then_allow() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "deny", + "rules": + [ + { + "permission": "allow", + "flow": ["egress","ingress"], + "action": [ + "put", + "declare_subscriber" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "lo0" + ] + }, + ] + } + }"#, + ) + .unwrap(); + + let mut config_sub = Config::default(); + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + + let _session = zenoh::open(config_router).res().unwrap(); + + let sub_session = zenoh::open(config_sub).res().unwrap(); + let pub_session = zenoh::open(config_pub).res().unwrap(); + + let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); + + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res() + .unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(10)); + publisher.put(VALUE).res().unwrap(); + std::thread::sleep(std::time::Duration::from_millis(10)); + assert_eq!(*zlock!(received_value), VALUE); +} From eae9df72471be8285fd4558f40697223e0ef082a Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 4 Apr 2024 00:03:34 +0200 Subject: [PATCH 085/122] cleaning policy code --- Cargo.lock | 22 +++++ Cargo.toml | 1 + DEFAULT_CONFIG.json5 | 79 ++++++++-------- commons/zenoh-config/Cargo.toml | 1 + commons/zenoh-config/src/lib.rs | 10 +-- zenoh/Cargo.toml | 1 + .../net/routing/interceptor/access_control.rs | 20 ++--- .../net/routing/interceptor/authorization.rs | 89 +++++++++++-------- 8 files changed, 128 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 850ba28d37..942bdcb941 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1091,6 +1091,26 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +[[package]] +name = "enum-map" +version = "2.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +dependencies = [ + "enum-map-derive", +] + +[[package]] +name = "enum-map-derive" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.33", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -4378,6 +4398,7 @@ dependencies = [ "async-trait", "base64 0.21.4", "const_format", + "enum-map", "env_logger", "event-listener 4.0.0", "flume", @@ -4470,6 +4491,7 @@ version = "0.11.0-dev" name = "zenoh-config" version = "0.11.0-dev" dependencies = [ + "enum-map", "flume", "json5", "log", diff --git a/Cargo.toml b/Cargo.toml index 33dd067d63..78d6c5fd2c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -197,6 +197,7 @@ zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } +enum-map = "2.7.3" [profile.dev] debug = true diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index e12b29bec6..eab6e0323b 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -328,49 +328,42 @@ known_keys_file: null, }, }, - acl: { - "enabled": true, //[true/false] acl will be activated only if this is set to true - "default_permission": "deny", //[deny/allow] default permission is deny (even if this is left empty or not specified) - "rules": - [ - { - "action": [ - "put" - ], - "permission": "allow", - "key_expr": [ - "test/thr" - ], - "interface": [ - "lo0" - ] - }, - { - "action": [ - "declare_subscriber" - ], - "permission": "allow", - "interface": [ - "lo0" - ], - "key_expr": [ - "test/thr" - ] - }, - // { - // "action": [ - // "get" - // ], - // "permission": "allow", - // "key_expr": [ - // "demo/example/**" - // ], - // "interface": [ - // "lo0" - // ] - // } - ] - } + // acl: { + // ///[true/false] acl will be activated only if this is set to true + // "enabled": true, + // ///[deny/allow] default permission is deny (even if this is left empty or not specified) + // "default_permission": "allow", + // ///rule set for permissions allowing or denying access to key-expressions + // "rules": + // [ + // { + // "action": [ + // "put" + // ], + // "flow":["egress","ingress"], + // "permission": "allow", + // "key_expr": [ + // "test/thr" + // ], + // "interface": [ + // "lo0" + // ] + // }, + // { + // "action": [ + // "declare_subscriber" + // ], + // "flow":["egress","ingress"], + // "permission": "allow", + // "interface": [ + // "lo0" + // ], + // "key_expr": [ + // "test/thr" + // ] + // }, + // ] + //} }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index feade8cc10..995e62768b 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -37,3 +37,4 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } secrecy = { workspace = true } +enum-map = { workspace = true } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 9d547f6de3..6a2d2d0f02 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -16,6 +16,7 @@ pub mod defaults; mod include; +use enum_map::Enum; use include::recursive_include; use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; use serde::{Deserialize, Serialize}; @@ -73,7 +74,7 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; -#[derive(Debug, Deserialize, Serialize, Clone)] +#[derive(Debug, Deserialize, Serialize, Clone, Enum)] #[serde(rename_all = "lowercase")] pub enum DownsamplingFlow { Egress, @@ -125,7 +126,7 @@ pub enum Subject { Interface(String), } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq, Enum)] #[serde(rename_all = "snake_case")] pub enum Action { Put, @@ -133,16 +134,15 @@ pub enum Action { Get, DeclareQueryable, } -pub const NUMBER_OF_ACTIONS: usize = 4; //size of Action enum (change according to Action size) -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq, Enum)] #[serde(rename_all = "lowercase")] pub enum Permission { Allow, Deny, } -pub const NUMBER_OF_PERMISSIONS: usize = 2; //size of permission enum (permanently fixed to 2) +pub type Flow = DownsamplingFlow; pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 2d85a604c5..13ab21ced1 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -107,6 +107,7 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } +enum-map = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index bc45a99a8c..088380cdbe 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -18,25 +18,23 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) +use super::{ + authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, InterceptorFactory, + InterceptorFactoryTrait, InterceptorTrait, +}; +use crate::net::routing::RoutingContext; use crate::KeyExpr; use std::any::Any; use std::sync::Arc; -use zenoh_config::{AclConfig, Action, Permission, Subject, ZenohId}; +use zenoh_config::{AclConfig, Action, Flow, Permission, Subject, ZenohId}; use zenoh_protocol::{ network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, }; use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; - -use crate::net::routing::RoutingContext; - -use super::{ - authorization::Flow, authorization::PolicyEnforcer, EgressInterceptor, IngressInterceptor, - InterceptorFactory, InterceptorFactoryTrait, InterceptorTrait, -}; -pub(crate) struct AclEnforcer { - pub(crate) enforcer: Arc, +pub struct AclEnforcer { + enforcer: Arc, } struct EgressAclEnforcer { policy_enforcer: Arc, @@ -147,7 +145,6 @@ impl InterceptorTrait for IngressAclEnforcer { let key_expr = cache .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) .or_else(|| ctx.full_expr())?; - if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -234,7 +231,6 @@ impl InterceptorTrait for EgressAclEnforcer { Some(ctx) } } - pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; fn interface_list(&self) -> Vec; diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 92b61221c6..7ab8fb54ae 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -17,33 +17,29 @@ //! This module is intended for Zenoh's internal use. //! //! [Click here for Zenoh's documentation](../zenoh/index.html) - use ahash::RandomState; +use enum_map::{enum_map, EnumMap}; use std::collections::HashMap; -use zenoh_config::{ - AclConfig, AclConfigRules, Action, DownsamplingFlow, Permission, PolicyRule, Subject, - NUMBER_OF_ACTIONS, NUMBER_OF_PERMISSIONS, -}; +use zenoh_config::{AclConfig, AclConfigRules, Action, Flow, Permission, PolicyRule, Subject}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -pub type Flow = DownsamplingFlow; type PermissionVec = Vec; type ActionVec = Vec; -pub struct PolicyForSubject(Vec); //vec of flows over vec of actions over vec of permissions of tree of kexpr -pub struct PolicyMap(HashMap); //mapping index of subject (i32) instead of actual subject value (String) - +type PolicyForSubject = Vec; +type PolicyMap = HashMap; +type SubjectMap = HashMap; type KeTreeRule = KeBoxTree; pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, - pub(crate) subject_map: HashMap, + pub(crate) subject_map: SubjectMap, pub(crate) policy_map: PolicyMap, } #[derive(Debug, Clone)] pub struct PolicyInformation { - subject_map: HashMap, + subject_map: SubjectMap, policy_rules: Vec, } @@ -52,8 +48,8 @@ impl PolicyEnforcer { PolicyEnforcer { acl_enabled: true, default_permission: Permission::Deny, - subject_map: HashMap::default(), - policy_map: PolicyMap(HashMap::default()), + subject_map: SubjectMap::default(), + policy_map: PolicyMap::default(), } } @@ -61,43 +57,45 @@ impl PolicyEnforcer { initializes the policy_enforcer */ pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { + let (action_index, permission_index, flow_index) = get_enum_map(); + let (number_of_actions, number_of_permissions, number_of_flows) = + (action_index.len(), permission_index.len(), flow_index.len()); + self.acl_enabled = acl_config.enabled; self.default_permission = acl_config.default_permission; if self.acl_enabled { if let Some(rules) = acl_config.rules { if rules.is_empty() { log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); - self.policy_map = PolicyMap(HashMap::default()); - self.subject_map = HashMap::default(); + self.policy_map = PolicyMap::default(); + self.subject_map = SubjectMap::default(); } else { let policy_information = self.policy_information_point(rules)?; - let subject_map = policy_information.subject_map; - let mut main_policy: PolicyMap = PolicyMap(HashMap::default()); - //first initialize the vector of vectors (required to maintain the indices) + let mut main_policy: PolicyMap = PolicyMap::default(); + for index in subject_map.values() { - let mut rule: PolicyForSubject = PolicyForSubject(Vec::new()); - for _k in 0..2 { + let mut rule = PolicyForSubject::new(); + for _k in 0..number_of_flows { let mut flow_rule = Vec::new(); - for _i in 0..NUMBER_OF_ACTIONS { + for _i in 0..number_of_actions { let mut action_rule: Vec = Vec::new(); - for _j in 0..NUMBER_OF_PERMISSIONS { + for _j in 0..number_of_permissions { let permission_rule = KeTreeRule::new(); action_rule.push(permission_rule); } flow_rule.push(action_rule); } - rule.0.push(flow_rule); + rule.push(flow_rule); } - main_policy.0.insert(*index, rule); + main_policy.insert(*index, rule); } for rule in policy_information.policy_rules { - //add key-expression values to the ketree as per the policy rules if let Some(index) = subject_map.get(&rule.subject) { - if let Some(single_policy) = main_policy.0.get_mut(index) { - single_policy.0[rule.flow as usize][rule.action as usize] - [rule.permission as usize] + if let Some(single_policy) = main_policy.get_mut(index) { + single_policy[flow_index[rule.flow]][action_index[rule.action]] + [permission_index[rule.permission]] .insert(keyexpr::new(&rule.key_expr)?, true); } }; @@ -137,7 +135,7 @@ impl PolicyEnforcer { } } } - let mut subject_map = HashMap::default(); + let mut subject_map = SubjectMap::default(); let mut counter = 1; //starting at 1 since 0 is the init value and should not match anything for rule in policy_rules.iter() { subject_map.insert(rule.subject.clone(), counter); @@ -162,22 +160,21 @@ impl PolicyEnforcer { ) -> ZResult { let policy_map = &self.policy_map; - match policy_map.0.get(&subject) { + let (action_index, permission_index, flow_index) = get_enum_map(); + match policy_map.get(&subject) { Some(single_policy) => { - //explicit Deny rules are ALWAYS given preference - let deny_result = single_policy.0[flow.clone() as usize][action.clone() as usize] + let deny_result = single_policy[flow.clone() as usize][action.clone() as usize] [Permission::Deny as usize] .nodes_including(keyexpr::new(&key_expr)?) .count(); if deny_result != 0 { return Ok(Permission::Deny); } - //if default_permission is Allow, ignore checks for Allow if self.default_permission == Permission::Allow { Ok(Permission::Allow) } else { - let allow_result = single_policy.0[flow as usize][action as usize] - [Permission::Allow as usize] + let allow_result = single_policy[flow_index[flow]][action_index[action]] + [permission_index[Permission::Allow]] .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -192,3 +189,25 @@ impl PolicyEnforcer { } } } + +fn get_enum_map() -> ( + EnumMap, + EnumMap, + EnumMap, +) { + let action_index = enum_map! { + Action::Put => 0_usize, + Action::DeclareSubscriber => 1, + Action::Get => 2, + Action::DeclareQueryable => 3, + }; + let permission_index = enum_map! { + Permission::Allow => 0_usize, + Permission::Deny => 1 + }; + let flow_index = enum_map! { + Flow::Egress => 0_usize, + Flow::Ingress => 1 + }; + (action_index, permission_index, flow_index) +} From 8e7ebf608d33f0cc69d60f7d66db17d1349b46b3 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 4 Apr 2024 11:20:37 +0200 Subject: [PATCH 086/122] acl tests for queryable --- zenoh/tests/acl.rs | 96 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 3e88d449c5..07433cf3da 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -10,6 +10,7 @@ fn test_acl() { test_pub_sub_deny(); test_pub_sub_allow_then_deny(); test_pub_sub_deny_then_allow(); + test_get_queryable_deny_then_allow(); } fn test_pub_sub_deny() { @@ -276,7 +277,7 @@ fn test_pub_sub_allow_then_deny() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_ne!(*zlock!(received_value), VALUE); } - +#[test] fn test_pub_sub_deny_then_allow() { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -373,3 +374,96 @@ fn test_pub_sub_deny_then_allow() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_eq!(*zlock!(received_value), VALUE); } + +#[test] +fn test_get_queryable_deny_then_allow() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "deny", + "rules": + [ + { + "permission": "allow", + "flow": ["egress","ingress"], + "action": [ + "get", + "declare_queryable" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "lo0" + ] + }, + ] + } + }"#, + ) + .unwrap(); + let mut config_qbl = Config::default(); + config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); + config_qbl + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_qbl + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_get = Config::default(); + config_get.set_mode(Some(WhatAmI::Client)).unwrap(); + config_get + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + config_get + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); + let qbl_session = zenoh::open(config_qbl).res().unwrap(); + let get_session = zenoh::open(config_get).res().unwrap(); + let mut received_value = String::new(); + let _qbl = qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + sample.reply(Ok(rep)).res().unwrap(); + }) + .res() + .unwrap(); + std::thread::sleep(std::time::Duration::from_secs(1)); + let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); + while let Ok(reply) = recv_reply.recv() { + match reply.sample { + Ok(sample) => received_value = sample.value.to_string(), + Err(e) => println!("Error : {}", e), + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + assert_eq!(received_value, VALUE); +} From 4a341e29db5e59e7712ea677a4f14668123dc538 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 4 Apr 2024 15:41:39 +0200 Subject: [PATCH 087/122] replaced nested Vec with structs --- Cargo.lock | 22 --- Cargo.toml | 1 - commons/zenoh-config/Cargo.toml | 1 - commons/zenoh-config/src/lib.rs | 8 +- zenoh/Cargo.toml | 1 - .../net/routing/interceptor/authorization.rs | 149 +++++++++++------- 6 files changed, 96 insertions(+), 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 942bdcb941..850ba28d37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1091,26 +1091,6 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" -[[package]] -name = "enum-map" -version = "2.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" -dependencies = [ - "enum-map-derive", -] - -[[package]] -name = "enum-map-derive" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.33", -] - [[package]] name = "env_filter" version = "0.1.0" @@ -4398,7 +4378,6 @@ dependencies = [ "async-trait", "base64 0.21.4", "const_format", - "enum-map", "env_logger", "event-listener 4.0.0", "flume", @@ -4491,7 +4470,6 @@ version = "0.11.0-dev" name = "zenoh-config" version = "0.11.0-dev" dependencies = [ - "enum-map", "flume", "json5", "log", diff --git a/Cargo.toml b/Cargo.toml index 78d6c5fd2c..33dd067d63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -197,7 +197,6 @@ zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } -enum-map = "2.7.3" [profile.dev] debug = true diff --git a/commons/zenoh-config/Cargo.toml b/commons/zenoh-config/Cargo.toml index 995e62768b..feade8cc10 100644 --- a/commons/zenoh-config/Cargo.toml +++ b/commons/zenoh-config/Cargo.toml @@ -37,4 +37,3 @@ zenoh-protocol = { workspace = true } zenoh-result = { workspace = true } zenoh-util = { workspace = true } secrecy = { workspace = true } -enum-map = { workspace = true } diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 6a2d2d0f02..fe84c74d27 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -15,8 +15,6 @@ //! Configuration to pass to `zenoh::open()` and `zenoh::scout()` functions and associated constants. pub mod defaults; mod include; - -use enum_map::Enum; use include::recursive_include; use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; use serde::{Deserialize, Serialize}; @@ -74,7 +72,7 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; -#[derive(Debug, Deserialize, Serialize, Clone, Enum)] +#[derive(Debug, Deserialize, Serialize, Clone)] #[serde(rename_all = "lowercase")] pub enum DownsamplingFlow { Egress, @@ -126,7 +124,7 @@ pub enum Subject { Interface(String), } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq, Enum)] +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "snake_case")] pub enum Action { Put, @@ -135,7 +133,7 @@ pub enum Action { DeclareQueryable, } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq, Enum)] +#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "lowercase")] pub enum Permission { Allow, diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 13ab21ced1..2d85a604c5 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -107,7 +107,6 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } -enum-map = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 7ab8fb54ae..f0c91c67d0 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -18,18 +18,86 @@ //! //! [Click here for Zenoh's documentation](../zenoh/index.html) use ahash::RandomState; -use enum_map::{enum_map, EnumMap}; use std::collections::HashMap; use zenoh_config::{AclConfig, AclConfigRules, Action, Flow, Permission, PolicyRule, Subject}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; -type PermissionVec = Vec; -type ActionVec = Vec; -type PolicyForSubject = Vec; +type PolicyForSubject = FlowPolicy; + type PolicyMap = HashMap; type SubjectMap = HashMap; type KeTreeRule = KeBoxTree; + +#[derive(Default)] +struct PermissionPolicy { + allow: KeTreeRule, + deny: KeTreeRule, +} + +impl PermissionPolicy { + #[allow(dead_code)] + fn permission(&self, permission: Permission) -> &KeTreeRule { + match permission { + Permission::Allow => &self.allow, + Permission::Deny => &self.deny, + } + } + fn permission_mut(&mut self, permission: Permission) -> &mut KeTreeRule { + match permission { + Permission::Allow => &mut self.allow, + Permission::Deny => &mut self.deny, + } + } +} +#[derive(Default)] +struct ActionPolicy { + get: PermissionPolicy, + put: PermissionPolicy, + declare_subscriber: PermissionPolicy, + declare_queryable: PermissionPolicy, +} + +impl ActionPolicy { + fn action(&self, action: Action) -> &PermissionPolicy { + match action { + Action::DeclareQueryable => &self.declare_queryable, + Action::Get => &self.get, + Action::Put => &self.put, + Action::DeclareSubscriber => &self.declare_subscriber, + } + } + fn action_mut(&mut self, action: Action) -> &mut PermissionPolicy { + match action { + Action::DeclareQueryable => &mut self.declare_queryable, + Action::Get => &mut self.get, + Action::Put => &mut self.put, + Action::DeclareSubscriber => &mut self.declare_subscriber, + } + } +} + +#[derive(Default)] +pub struct FlowPolicy { + ingress: ActionPolicy, + egress: ActionPolicy, +} + +impl FlowPolicy { + fn flow(&self, flow: Flow) -> &ActionPolicy { + match flow { + Flow::Ingress => &self.ingress, + Flow::Egress => &self.egress, + } + } + fn flow_mut(&mut self, flow: Flow) -> &mut ActionPolicy { + match flow { + Flow::Ingress => &mut self.ingress, + Flow::Egress => &mut self.egress, + } + } +} + pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, @@ -57,10 +125,6 @@ impl PolicyEnforcer { initializes the policy_enforcer */ pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { - let (action_index, permission_index, flow_index) = get_enum_map(); - let (number_of_actions, number_of_permissions, number_of_flows) = - (action_index.len(), permission_index.len(), flow_index.len()); - self.acl_enabled = acl_config.enabled; self.default_permission = acl_config.default_permission; if self.acl_enabled { @@ -74,29 +138,21 @@ impl PolicyEnforcer { let subject_map = policy_information.subject_map; let mut main_policy: PolicyMap = PolicyMap::default(); - for index in subject_map.values() { - let mut rule = PolicyForSubject::new(); - for _k in 0..number_of_flows { - let mut flow_rule = Vec::new(); - for _i in 0..number_of_actions { - let mut action_rule: Vec = Vec::new(); - for _j in 0..number_of_permissions { - let permission_rule = KeTreeRule::new(); - action_rule.push(permission_rule); - } - flow_rule.push(action_rule); - } - rule.push(flow_rule); - } - main_policy.insert(*index, rule); - } - for rule in policy_information.policy_rules { if let Some(index) = subject_map.get(&rule.subject) { if let Some(single_policy) = main_policy.get_mut(index) { - single_policy[flow_index[rule.flow]][action_index[rule.action]] - [permission_index[rule.permission]] + single_policy + .flow_mut(rule.flow) + .action_mut(rule.action) + .permission_mut(rule.permission) .insert(keyexpr::new(&rule.key_expr)?, true); + } else { + let mut pfs = PolicyForSubject::default(); + pfs.flow_mut(rule.flow) + .action_mut(rule.action) + .permission_mut(rule.permission) + .insert(keyexpr::new(&rule.key_expr)?, true); + main_policy.insert(*index, pfs); } }; } @@ -136,7 +192,8 @@ impl PolicyEnforcer { } } let mut subject_map = SubjectMap::default(); - let mut counter = 1; //starting at 1 since 0 is the init value and should not match anything + let mut counter = 1; + //starting at 1 since 0 is the init value and should not match anything for rule in policy_rules.iter() { subject_map.insert(rule.subject.clone(), counter); counter += 1; @@ -159,12 +216,12 @@ impl PolicyEnforcer { key_expr: &str, ) -> ZResult { let policy_map = &self.policy_map; - - let (action_index, permission_index, flow_index) = get_enum_map(); match policy_map.get(&subject) { Some(single_policy) => { - let deny_result = single_policy[flow.clone() as usize][action.clone() as usize] - [Permission::Deny as usize] + let deny_result = single_policy + .flow(flow.clone()) + .action(action.clone()) + .deny .nodes_including(keyexpr::new(&key_expr)?) .count(); if deny_result != 0 { @@ -173,8 +230,10 @@ impl PolicyEnforcer { if self.default_permission == Permission::Allow { Ok(Permission::Allow) } else { - let allow_result = single_policy[flow_index[flow]][action_index[action]] - [permission_index[Permission::Allow]] + let allow_result = single_policy + .flow(flow) + .action(action) + .allow .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -189,25 +248,3 @@ impl PolicyEnforcer { } } } - -fn get_enum_map() -> ( - EnumMap, - EnumMap, - EnumMap, -) { - let action_index = enum_map! { - Action::Put => 0_usize, - Action::DeclareSubscriber => 1, - Action::Get => 2, - Action::DeclareQueryable => 3, - }; - let permission_index = enum_map! { - Permission::Allow => 0_usize, - Permission::Deny => 1 - }; - let flow_index = enum_map! { - Flow::Egress => 0_usize, - Flow::Ingress => 1 - }; - (action_index, permission_index, flow_index) -} From fd9d0d98a2f5ee18797a3c85d844a05eb328bd5f Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 4 Apr 2024 16:54:07 +0200 Subject: [PATCH 088/122] fixed queryable test issue --- zenoh/tests/acl.rs | 269 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 266 insertions(+), 3 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 07433cf3da..13a3ef5905 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -10,6 +10,9 @@ fn test_acl() { test_pub_sub_deny(); test_pub_sub_allow_then_deny(); test_pub_sub_deny_then_allow(); + test_get_queryable_allow(); + test_get_queryable_deny(); + test_get_queryable_allow_then_deny(); test_get_queryable_deny_then_allow(); } @@ -277,7 +280,6 @@ fn test_pub_sub_allow_then_deny() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_ne!(*zlock!(received_value), VALUE); } -#[test] fn test_pub_sub_deny_then_allow() { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -375,7 +377,265 @@ fn test_pub_sub_deny_then_allow() { assert_eq!(*zlock!(received_value), VALUE); } -#[test] +fn test_get_queryable_deny() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "deny", + "rules": + [ + + ] + } + }"#, + ) + .unwrap(); + let mut config_qbl = Config::default(); + config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); + config_qbl + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_qbl + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_get = Config::default(); + config_get.set_mode(Some(WhatAmI::Client)).unwrap(); + config_get + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + config_get + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); + let qbl_session = zenoh::open(config_qbl).res().unwrap(); + let get_session = zenoh::open(config_get).res().unwrap(); + let mut received_value = String::new(); + let _qbl = qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + sample.reply(Ok(rep)).res().unwrap(); + }) + .res() + .unwrap(); + std::thread::sleep(std::time::Duration::from_secs(1)); + let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); + while let Ok(reply) = recv_reply.recv() { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + assert_ne!(received_value, VALUE); +} + +fn test_get_queryable_allow() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "allow", + "rules": + [ + + ] + } + }"#, + ) + .unwrap(); + let mut config_qbl = Config::default(); + config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); + config_qbl + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_qbl + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_get = Config::default(); + config_get.set_mode(Some(WhatAmI::Client)).unwrap(); + config_get + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + config_get + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); + let qbl_session = zenoh::open(config_qbl).res().unwrap(); + let get_session = zenoh::open(config_get).res().unwrap(); + let mut received_value = String::new(); + let _qbl = qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + sample.reply(Ok(rep)).res().unwrap(); + }) + .res() + .unwrap(); + std::thread::sleep(std::time::Duration::from_secs(1)); + let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); + while let Ok(reply) = recv_reply.recv() { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + assert_eq!(received_value, VALUE); +} + +fn test_get_queryable_allow_then_deny() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec![ + "tcp/localhost:7447".parse().unwrap(), + "tcp/localhost:7448".parse().unwrap(), + ]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + config_router + .insert_json5( + "transport", + r#"{ + acl: { + "enabled": true, + "default_permission": "allow", + "rules": + [ + { + "permission": "deny", + "flow": ["egress","ingress"], + "action": [ + "get", + "declare_queryable" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "lo0" + ] + }, + ] + } + }"#, + ) + .unwrap(); + let mut config_qbl = Config::default(); + config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); + config_qbl + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_qbl + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_get = Config::default(); + config_get.set_mode(Some(WhatAmI::Client)).unwrap(); + config_get + .connect + .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .unwrap(); + config_get + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); + let qbl_session = zenoh::open(config_qbl).res().unwrap(); + let get_session = zenoh::open(config_get).res().unwrap(); + let mut received_value = String::new(); + let _qbl = qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + sample.reply(Ok(rep)).res().unwrap(); + }) + .res() + .unwrap(); + std::thread::sleep(std::time::Duration::from_secs(1)); + let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); + while let Ok(reply) = recv_reply.recv() { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + assert_ne!(received_value, VALUE); +} + fn test_get_queryable_deny_then_allow() { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -460,7 +720,10 @@ fn test_get_queryable_deny_then_allow() { let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); while let Ok(reply) = recv_reply.recv() { match reply.sample { - Ok(sample) => received_value = sample.value.to_string(), + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } Err(e) => println!("Error : {}", e), } } From 34db54eb9b87a692aa5a8e5c854098af447b1992 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 5 Apr 2024 16:34:58 +0200 Subject: [PATCH 089/122] fixed cache downcast error --- .../net/routing/interceptor/access_control.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 088380cdbe..e955a63b40 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -143,8 +143,15 @@ impl InterceptorTrait for IngressAclEnforcer { cache: Option<&Box>, ) -> Option> { let key_expr = cache - .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) + .and_then(|i| match i.downcast_ref::() { + Some(e) => Some(e.as_str()), + None => { + log::debug!("[ACCESS LOG]: Cache content was not of type String"); + None + } + }) .or_else(|| ctx.full_expr())?; + if let NetworkBody::Push(Push { payload: PushBody::Put(_), .. @@ -192,7 +199,13 @@ impl InterceptorTrait for EgressAclEnforcer { cache: Option<&Box>, ) -> Option> { let key_expr = cache - .and_then(|i| i.downcast_ref::().map(|e| e.as_str())) + .and_then(|i| match i.downcast_ref::() { + Some(e) => Some(e.as_str()), + None => { + log::debug!("[ACCESS LOG]: Cache content was not of type String"); + None + } + }) .or_else(|| ctx.full_expr())?; if let NetworkBody::Push(Push { From a02ffad319a3503abe6c8bb88bf09995a3a2aae5 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Fri, 5 Apr 2024 17:47:21 +0200 Subject: [PATCH 090/122] fixed interface issue in tests --- Cargo.lock | 37 +++++++ Cargo.toml | 2 +- zenoh/Cargo.toml | 2 +- zenoh/tests/acl.rs | 250 ++++++++++++++++++++++++--------------------- 4 files changed, 170 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 850ba28d37..f6d5a80b2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1460,6 +1460,20 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +[[package]] +name = "handlebars" +version = "3.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error", + "serde", + "serde_json", +] + [[package]] name = "hashbrown" version = "0.13.2" @@ -1663,6 +1677,22 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "interfaces" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6250a98af259a26fd5a4a6081fccea9ac116e4c3178acf4aeb86d32d2b7715" +dependencies = [ + "bitflags 2.4.2", + "cc", + "handlebars", + "lazy_static", + "libc", + "nix 0.26.4", + "serde", + "serde_derive", +] + [[package]] name = "io-lifetimes" version = "1.0.11" @@ -2459,6 +2489,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quinn" version = "0.10.2" @@ -4384,6 +4420,7 @@ dependencies = [ "form_urlencoded", "futures", "git-version", + "interfaces", "lazy_static", "log", "ordered-float", diff --git a/Cargo.toml b/Cargo.toml index 33dd067d63..fd97fcfabd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -197,7 +197,7 @@ zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } - +interfaces = "0.0.9" [profile.dev] debug = true opt-level = 0 diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 2d85a604c5..45586e3be7 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -107,7 +107,7 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } - +interfaces = { workspace = true } [build-dependencies] rustc_version = { workspace = true } diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 13a3ef5905..f8fafc94f8 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,3 +1,4 @@ +use interfaces::Interface; use std::sync::{Arc, Mutex}; use zenoh::prelude::sync::*; use zenoh_config::Config; @@ -8,12 +9,20 @@ fn test_acl() { env_logger::init(); test_pub_sub_allow(); test_pub_sub_deny(); - test_pub_sub_allow_then_deny(); - test_pub_sub_deny_then_allow(); test_get_queryable_allow(); test_get_queryable_deny(); - test_get_queryable_allow_then_deny(); - test_get_queryable_deny_then_allow(); + if let Some(loopback_face) = get_loopback_interface() { + test_pub_sub_allow_then_deny(&loopback_face.name); + test_pub_sub_deny_then_allow(&loopback_face.name); + test_get_queryable_allow_then_deny(&loopback_face.name); + test_get_queryable_deny_then_allow(&loopback_face.name); + } +} + +fn get_loopback_interface() -> Option { + let mut ifs = Interface::get_all().expect("could not get interfaces"); + ifs.sort_by(|a, b| a.name.cmp(&b.name)); + ifs.into_iter().find(|i| i.is_loopback()) } fn test_pub_sub_deny() { @@ -184,7 +193,7 @@ fn test_pub_sub_allow() { assert_eq!(*zlock!(received_value), VALUE); } -fn test_pub_sub_allow_then_deny() { +fn test_pub_sub_allow_then_deny(interface_name: &str) { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); config_router @@ -200,34 +209,34 @@ fn test_pub_sub_allow_then_deny() { .multicast .set_enabled(Some(false)) .unwrap(); - config_router - .insert_json5( - "transport", - r#"{ - acl: { - "enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flow": ["egress"], - "action": [ - "put", - ], - "key_expr": [ - "test/demo" - ], - "interface": [ - "lo0" - ] - }, + let acl_js = format!( + r#" + {{ + acl: {{ + "enabled": true, + "default_permission": "allow", + "rules": + [ + {{ + "permission": "deny", + "flow": ["egress"], + "action": [ + "put", + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "{}" ] - } - }"#, - ) - .unwrap(); - + }}, + ] + }} + }} + "#, + interface_name + ); + config_router.insert_json5("transport", &acl_js).unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); config_sub @@ -280,7 +289,7 @@ fn test_pub_sub_allow_then_deny() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_ne!(*zlock!(received_value), VALUE); } -fn test_pub_sub_deny_then_allow() { +fn test_pub_sub_deny_then_allow(interface_name: &str) { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); config_router @@ -296,34 +305,35 @@ fn test_pub_sub_deny_then_allow() { .multicast .set_enabled(Some(false)) .unwrap(); - config_router - .insert_json5( - "transport", - r#"{ - acl: { - "enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flow": ["egress","ingress"], - "action": [ - "put", - "declare_subscriber" - ], - "key_expr": [ - "test/demo" - ], - "interface": [ - "lo0" - ] - }, - ] - } - }"#, - ) - .unwrap(); + let acl_js = format!( + r#" + {{ + acl: {{ + "enabled": true, + "default_permission": "deny", + "rules": + [ + {{ + "permission": "allow", + "flow": ["egress","ingress"], + "action": [ + "put", + "declare_subscriber" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "{}" + ] + }}, + ] + }} + }} + "#, + interface_name + ); + config_router.insert_json5("transport", &acl_js).unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); @@ -541,7 +551,7 @@ fn test_get_queryable_allow() { assert_eq!(received_value, VALUE); } -fn test_get_queryable_allow_then_deny() { +fn test_get_queryable_allow_then_deny(interface_name: &str) { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); config_router @@ -557,34 +567,35 @@ fn test_get_queryable_allow_then_deny() { .multicast .set_enabled(Some(false)) .unwrap(); - config_router - .insert_json5( - "transport", - r#"{ - acl: { - "enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flow": ["egress","ingress"], - "action": [ - "get", - "declare_queryable" - ], - "key_expr": [ - "test/demo" - ], - "interface": [ - "lo0" - ] - }, - ] - } - }"#, - ) - .unwrap(); + let acl_js = format!( + r#" + {{ + acl: {{ + "enabled": true, + "default_permission": "allow", + "rules": + [ + {{ + "permission": "deny", + "flow": ["egress"], + "action": [ + "get", + "declare_queryable" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "{}" + ] + }}, + ] + }} + }} + "#, + interface_name + ); + config_router.insert_json5("transport", &acl_js).unwrap(); let mut config_qbl = Config::default(); config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); config_qbl @@ -636,7 +647,7 @@ fn test_get_queryable_allow_then_deny() { assert_ne!(received_value, VALUE); } -fn test_get_queryable_deny_then_allow() { +fn test_get_queryable_deny_then_allow(interface_name: &str) { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); config_router @@ -652,34 +663,35 @@ fn test_get_queryable_deny_then_allow() { .multicast .set_enabled(Some(false)) .unwrap(); - config_router - .insert_json5( - "transport", - r#"{ - acl: { - "enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flow": ["egress","ingress"], - "action": [ - "get", - "declare_queryable" - ], - "key_expr": [ - "test/demo" - ], - "interface": [ - "lo0" - ] - }, - ] - } - }"#, - ) - .unwrap(); + let acl_js = format!( + r#" + {{ + acl: {{ + "enabled": true, + "default_permission": "deny", + "rules": + [ + {{ + "permission": "allow", + "flow": ["egress","ingress"], + "action": [ + "get", + "declare_queryable" + ], + "key_expr": [ + "test/demo" + ], + "interface": [ + "{}" + ] + }}, + ] + }} + }} + "#, + interface_name + ); + config_router.insert_json5("transport", &acl_js).unwrap(); let mut config_qbl = Config::default(); config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); config_qbl From 266d92684d038d41c4b14110e3c7ad2631d4f320 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 20:11:49 +0200 Subject: [PATCH 091/122] move acl config out of transport --- commons/zenoh-config/src/lib.rs | 17 ++++---- .../net/routing/interceptor/access_control.rs | 12 +++--- .../net/routing/interceptor/authorization.rs | 18 ++++---- zenoh/src/net/routing/interceptor/mod.rs | 2 +- zenoh/tests/acl.rs | 41 +++++++------------ 5 files changed, 41 insertions(+), 49 deletions(-) diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index fe84c74d27..c25efecbbe 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -72,7 +72,7 @@ impl Zeroize for SecretString { pub type SecretValue = Secret; -#[derive(Debug, Deserialize, Serialize, Clone)] +#[derive(Debug, Deserialize, Serialize, Clone, Copy)] #[serde(rename_all = "lowercase")] pub enum DownsamplingFlow { Egress, @@ -124,7 +124,7 @@ pub enum Subject { Interface(String), } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "snake_case")] pub enum Action { Put, @@ -133,7 +133,7 @@ pub enum Action { DeclareQueryable, } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, Eq, Hash, PartialEq)] #[serde(rename_all = "lowercase")] pub enum Permission { Allow, @@ -472,11 +472,7 @@ validated_struct::validator! { known_keys_file: Option, }, }, - pub acl: AclConfig { - pub enabled: bool, - pub default_permission: Permission, - pub rules: Option> - } + }, /// Configuration of the admin space. pub adminspace: #[derive(Default)] @@ -501,6 +497,11 @@ validated_struct::validator! { /// Configuration of the downsampling. downsampling: Vec, + pub acl: AclConfig { + pub enabled: bool, + pub default_permission: Permission, + pub rules: Option> + }, /// A list of directories where plugins may be searched for if no `__path__` was specified for them. /// The executable's current directory will be added to the search paths. diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index e955a63b40..356784b598 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -47,7 +47,9 @@ struct IngressAclEnforcer { zid: ZenohId, } -pub(crate) fn acl_interceptor_factories(acl_config: AclConfig) -> ZResult> { +pub(crate) fn acl_interceptor_factories( + acl_config: &AclConfig, +) -> ZResult> { let mut res: Vec = vec![]; if acl_config.enabled { @@ -254,7 +256,7 @@ pub trait AclActionMethods { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); - let mut decision = policy_enforcer.default_permission.clone(); + let mut decision = policy_enforcer.default_permission; for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, @@ -289,7 +291,7 @@ pub trait AclActionMethods { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); - let mut decision = policy_enforcer.default_permission.clone(); + let mut decision = policy_enforcer.default_permission; for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, @@ -323,7 +325,7 @@ pub trait AclActionMethods { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); - let mut decision = policy_enforcer.default_permission.clone(); + let mut decision = policy_enforcer.default_permission; for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, @@ -361,7 +363,7 @@ pub trait AclActionMethods { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); - let mut decision = policy_enforcer.default_permission.clone(); + let mut decision = policy_enforcer.default_permission; for subject in &interface_list { match policy_enforcer.policy_decision_point( *subject, diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index f0c91c67d0..3154f58751 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -124,11 +124,11 @@ impl PolicyEnforcer { /* initializes the policy_enforcer */ - pub fn init(&mut self, acl_config: AclConfig) -> ZResult<()> { + pub fn init(&mut self, acl_config: &AclConfig) -> ZResult<()> { self.acl_enabled = acl_config.enabled; self.default_permission = acl_config.default_permission; if self.acl_enabled { - if let Some(rules) = acl_config.rules { + if let Some(rules) = &acl_config.rules { if rules.is_empty() { log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); self.policy_map = PolicyMap::default(); @@ -171,7 +171,7 @@ impl PolicyEnforcer { */ pub fn policy_information_point( &self, - config_rule_set: Vec, + config_rule_set: &Vec, ) -> ZResult { let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { @@ -182,9 +182,9 @@ impl PolicyEnforcer { policy_rules.push(PolicyRule { subject: Subject::Interface(subject.clone()), key_expr: key_expr.clone(), - action: action.clone(), - permission: config_rule.permission.clone(), - flow: flow.clone(), + action: *action, + permission: config_rule.permission, + flow: *flow, }) } } @@ -219,8 +219,8 @@ impl PolicyEnforcer { match policy_map.get(&subject) { Some(single_policy) => { let deny_result = single_policy - .flow(flow.clone()) - .action(action.clone()) + .flow(flow) + .action(action) .deny .nodes_including(keyexpr::new(&key_expr)?) .count(); @@ -244,7 +244,7 @@ impl PolicyEnforcer { } } } - None => Ok(self.default_permission.clone()), + None => Ok(self.default_permission), } } } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index b566ec19e7..b8009860ec 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -65,7 +65,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult Option { ifs.sort_by(|a, b| a.name.cmp(&b.name)); ifs.into_iter().find(|i| i.is_loopback()) } - fn test_pub_sub_deny() { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -43,15 +42,15 @@ fn test_pub_sub_deny() { .unwrap(); config_router .insert_json5( - "transport", + "acl", r#"{ - acl: { + "enabled": true, "default_permission": "deny", "rules": [ ] - } + }"#, ) .unwrap(); @@ -127,15 +126,15 @@ fn test_pub_sub_allow() { .unwrap(); config_router .insert_json5( - "transport", + "acl", r#"{ - acl: { + "enabled": true, "default_permission": "allow", "rules": [ ] - } + }"#, ) .unwrap(); @@ -192,7 +191,6 @@ fn test_pub_sub_allow() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_eq!(*zlock!(received_value), VALUE); } - fn test_pub_sub_allow_then_deny(interface_name: &str) { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -212,7 +210,7 @@ fn test_pub_sub_allow_then_deny(interface_name: &str) { let acl_js = format!( r#" {{ - acl: {{ + "enabled": true, "default_permission": "allow", "rules": @@ -231,12 +229,12 @@ fn test_pub_sub_allow_then_deny(interface_name: &str) { ] }}, ] - }} + }} "#, interface_name ); - config_router.insert_json5("transport", &acl_js).unwrap(); + config_router.insert_json5("acl", &acl_js).unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); config_sub @@ -308,7 +306,6 @@ fn test_pub_sub_deny_then_allow(interface_name: &str) { let acl_js = format!( r#" {{ - acl: {{ "enabled": true, "default_permission": "deny", "rules": @@ -328,12 +325,11 @@ fn test_pub_sub_deny_then_allow(interface_name: &str) { ] }}, ] - }} }} "#, interface_name ); - config_router.insert_json5("transport", &acl_js).unwrap(); + config_router.insert_json5("acl", &acl_js).unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); @@ -405,16 +401,14 @@ fn test_get_queryable_deny() { .unwrap(); config_router .insert_json5( - "transport", + "acl", r#"{ - acl: { "enabled": true, "default_permission": "deny", "rules": [ ] - } }"#, ) .unwrap(); @@ -487,16 +481,15 @@ fn test_get_queryable_allow() { .unwrap(); config_router .insert_json5( - "transport", + "acl", r#"{ - acl: { "enabled": true, "default_permission": "allow", "rules": [ ] - } + }"#, ) .unwrap(); @@ -570,7 +563,6 @@ fn test_get_queryable_allow_then_deny(interface_name: &str) { let acl_js = format!( r#" {{ - acl: {{ "enabled": true, "default_permission": "allow", "rules": @@ -590,12 +582,11 @@ fn test_get_queryable_allow_then_deny(interface_name: &str) { ] }}, ] - }} }} "#, interface_name ); - config_router.insert_json5("transport", &acl_js).unwrap(); + config_router.insert_json5("acl", &acl_js).unwrap(); let mut config_qbl = Config::default(); config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); config_qbl @@ -666,7 +657,6 @@ fn test_get_queryable_deny_then_allow(interface_name: &str) { let acl_js = format!( r#" {{ - acl: {{ "enabled": true, "default_permission": "deny", "rules": @@ -686,12 +676,11 @@ fn test_get_queryable_deny_then_allow(interface_name: &str) { ] }}, ] - }} }} "#, interface_name ); - config_router.insert_json5("transport", &acl_js).unwrap(); + config_router.insert_json5("acl", &acl_js).unwrap(); let mut config_qbl = Config::default(); config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); config_qbl From 7eb121ab30c72ecc7c1f6e9a7037abc96a5c64af Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 20:14:01 +0200 Subject: [PATCH 092/122] move acl config out of transport --- DEFAULT_CONFIG.json5 | 74 +++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index eab6e0323b..c85392225d 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -167,6 +167,43 @@ // ], // }, // ], + /// configure acl rules + // acl: { + // ///[true/false] acl will be activated only if this is set to true + // "enabled": true, + // ///[deny/allow] default permission is deny (even if this is left empty or not specified) + // "default_permission": "allow", + // ///rule set for permissions allowing or denying access to key-expressions + // "rules": + // [ + // { + // "action": [ + // "put" + // ], + // "flow":["egress","ingress"], + // "permission": "allow", + // "key_expr": [ + // "test/thr" + // ], + // "interface": [ + // "lo0" + // ] + // }, + // { + // "action": [ + // "declare_subscriber" + // ], + // "flow":["egress","ingress"], + // "permission": "allow", + // "interface": [ + // "lo0" + // ], + // "key_expr": [ + // "test/thr" + // ] + // }, + // ] + //} /// Configure internal transport parameters transport: { unicast: { @@ -328,42 +365,7 @@ known_keys_file: null, }, }, - // acl: { - // ///[true/false] acl will be activated only if this is set to true - // "enabled": true, - // ///[deny/allow] default permission is deny (even if this is left empty or not specified) - // "default_permission": "allow", - // ///rule set for permissions allowing or denying access to key-expressions - // "rules": - // [ - // { - // "action": [ - // "put" - // ], - // "flow":["egress","ingress"], - // "permission": "allow", - // "key_expr": [ - // "test/thr" - // ], - // "interface": [ - // "lo0" - // ] - // }, - // { - // "action": [ - // "declare_subscriber" - // ], - // "flow":["egress","ingress"], - // "permission": "allow", - // "interface": [ - // "lo0" - // ], - // "key_expr": [ - // "test/thr" - // ] - // }, - // ] - //} + }, /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release From 98a3704d7dde57936819fe986f425b5c1c13578b Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 22:53:25 +0200 Subject: [PATCH 093/122] clean tests code --- zenoh/tests/acl.rs | 446 +++++++++++---------------------------------- 1 file changed, 102 insertions(+), 344 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 7132e5f2d6..d006788ea4 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -7,24 +7,6 @@ use zenoh_core::zlock; #[test] fn test_acl() { env_logger::init(); - test_pub_sub_allow(); - test_pub_sub_deny(); - test_get_queryable_allow(); - test_get_queryable_deny(); - if let Some(loopback_face) = get_loopback_interface() { - test_pub_sub_allow_then_deny(&loopback_face.name); - test_pub_sub_deny_then_allow(&loopback_face.name); - test_get_queryable_allow_then_deny(&loopback_face.name); - test_get_queryable_deny_then_allow(&loopback_face.name); - } -} - -fn get_loopback_interface() -> Option { - let mut ifs = Interface::get_all().expect("could not get interfaces"); - ifs.sort_by(|a, b| a.name.cmp(&b.name)); - ifs.into_iter().find(|i| i.is_loopback()) -} -fn test_pub_sub_deny() { let mut config_router = Config::default(); config_router.set_mode(Some(WhatAmI::Router)).unwrap(); config_router @@ -40,20 +22,6 @@ fn test_pub_sub_deny() { .multicast .set_enabled(Some(false)) .unwrap(); - config_router - .insert_json5( - "acl", - r#"{ - - "enabled": true, - "default_permission": "deny", - "rules": - [ - ] - - }"#, - ) - .unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); @@ -80,19 +48,69 @@ fn test_pub_sub_deny() { .set_enabled(Some(false)) .unwrap(); - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; + let config_qbl = &config_pub; + let config_get = &config_sub; - let _session = zenoh::open(config_router).res().unwrap(); + test_pub_sub_allow(config_router.clone(), &config_sub, &config_pub); + test_pub_sub_deny(config_router.clone(), &config_sub, &config_pub); + test_get_queryable_allow(config_router.clone(), config_qbl, config_get); + test_get_queryable_deny(config_router.clone(), config_qbl, config_get); + if let Some(loopback_face) = get_loopback_interface() { + test_pub_sub_allow_then_deny( + config_router.clone(), + &config_sub, + &config_pub, + &loopback_face.name, + ); + test_pub_sub_deny_then_allow( + config_router.clone(), + &config_sub, + &config_pub, + &loopback_face.name, + ); + test_get_queryable_allow_then_deny( + config_router.clone(), + config_qbl, + config_get, + &loopback_face.name, + ); + test_get_queryable_deny_then_allow( + config_router.clone(), + config_qbl, + config_get, + &loopback_face.name, + ); + } +} - let sub_session = zenoh::open(config_sub).res().unwrap(); - let pub_session = zenoh::open(config_pub).res().unwrap(); +fn get_loopback_interface() -> Option { + let mut ifs = Interface::get_all().expect("could not get interfaces"); + ifs.sort_by(|a, b| a.name.cmp(&b.name)); + ifs.into_iter().find(|i| i.is_loopback()) +} - let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); +fn test_pub_sub_deny(mut config_router: Config, config_pub: &Config, config_sub: &Config) { + config_router + .insert_json5( + "acl", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": + [ + ] + }"#, + ) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); + let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); + let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); + let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = &sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { @@ -108,22 +126,7 @@ fn test_pub_sub_deny() { assert_ne!(*zlock!(received_value), VALUE); } -fn test_pub_sub_allow() { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_pub_sub_allow(mut config_router: Config, config_pub: &Config, config_sub: &Config) { config_router .insert_json5( "acl", @@ -139,44 +142,14 @@ fn test_pub_sub_allow() { ) .unwrap(); - let mut config_sub = Config::default(); - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - - let sub_session = zenoh::open(config_sub).res().unwrap(); - let pub_session = zenoh::open(config_pub).res().unwrap(); - + let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); + let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { @@ -191,22 +164,12 @@ fn test_pub_sub_allow() { std::thread::sleep(std::time::Duration::from_millis(10)); assert_eq!(*zlock!(received_value), VALUE); } -fn test_pub_sub_allow_then_deny(interface_name: &str) { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_pub_sub_allow_then_deny( + mut config_router: Config, + config_pub: &Config, + config_sub: &Config, + interface_name: &str, +) { let acl_js = format!( r#" {{ @@ -235,44 +198,14 @@ fn test_pub_sub_allow_then_deny(interface_name: &str) { interface_name ); config_router.insert_json5("acl", &acl_js).unwrap(); - let mut config_sub = Config::default(); - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - - let sub_session = zenoh::open(config_sub).res().unwrap(); - let pub_session = zenoh::open(config_pub).res().unwrap(); - + let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); + let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { @@ -287,22 +220,12 @@ fn test_pub_sub_allow_then_deny(interface_name: &str) { std::thread::sleep(std::time::Duration::from_millis(10)); assert_ne!(*zlock!(received_value), VALUE); } -fn test_pub_sub_deny_then_allow(interface_name: &str) { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_pub_sub_deny_then_allow( + mut config_router: Config, + config_pub: &Config, + config_sub: &Config, + interface_name: &str, +) { let acl_js = format!( r#" {{ @@ -331,43 +254,14 @@ fn test_pub_sub_deny_then_allow(interface_name: &str) { ); config_router.insert_json5("acl", &acl_js).unwrap(); - let mut config_sub = Config::default(); - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - - let sub_session = zenoh::open(config_sub).res().unwrap(); - let pub_session = zenoh::open(config_pub).res().unwrap(); - + let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); + let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { @@ -383,22 +277,7 @@ fn test_pub_sub_deny_then_allow(interface_name: &str) { assert_eq!(*zlock!(received_value), VALUE); } -fn test_get_queryable_deny() { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_get_queryable_deny(mut config_router: Config, config_qbl: &Config, config_get: &Config) { config_router .insert_json5( "acl", @@ -412,33 +291,12 @@ fn test_get_queryable_deny() { }"#, ) .unwrap(); - let mut config_qbl = Config::default(); - config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); - config_qbl - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_qbl - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_get = Config::default(); - config_get.set_mode(Some(WhatAmI::Client)).unwrap(); - config_get - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - config_get - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl).res().unwrap(); - let get_session = zenoh::open(config_get).res().unwrap(); + let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); + let get_session = zenoh::open(config_get.clone()).res().unwrap(); let mut received_value = String::new(); let _qbl = qbl_session .declare_queryable(KEY_EXPR) @@ -463,22 +321,7 @@ fn test_get_queryable_deny() { assert_ne!(received_value, VALUE); } -fn test_get_queryable_allow() { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_get_queryable_allow(mut config_router: Config, config_qbl: &Config, config_get: &Config) { config_router .insert_json5( "acl", @@ -487,39 +330,16 @@ fn test_get_queryable_allow() { "default_permission": "allow", "rules": [ - ] - }"#, ) .unwrap(); - let mut config_qbl = Config::default(); - config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); - config_qbl - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_qbl - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_get = Config::default(); - config_get.set_mode(Some(WhatAmI::Client)).unwrap(); - config_get - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - config_get - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl).res().unwrap(); - let get_session = zenoh::open(config_get).res().unwrap(); + let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); + let get_session = zenoh::open(config_get.clone()).res().unwrap(); let mut received_value = String::new(); let _qbl = qbl_session .declare_queryable(KEY_EXPR) @@ -544,22 +364,12 @@ fn test_get_queryable_allow() { assert_eq!(received_value, VALUE); } -fn test_get_queryable_allow_then_deny(interface_name: &str) { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_get_queryable_allow_then_deny( + mut config_router: Config, + config_qbl: &Config, + config_get: &Config, + interface_name: &str, +) { let acl_js = format!( r#" {{ @@ -587,33 +397,12 @@ fn test_get_queryable_allow_then_deny(interface_name: &str) { interface_name ); config_router.insert_json5("acl", &acl_js).unwrap(); - let mut config_qbl = Config::default(); - config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); - config_qbl - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_qbl - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_get = Config::default(); - config_get.set_mode(Some(WhatAmI::Client)).unwrap(); - config_get - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - config_get - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl).res().unwrap(); - let get_session = zenoh::open(config_get).res().unwrap(); + let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); + let get_session = zenoh::open(config_get.clone()).res().unwrap(); let mut received_value = String::new(); let _qbl = qbl_session .declare_queryable(KEY_EXPR) @@ -638,22 +427,12 @@ fn test_get_queryable_allow_then_deny(interface_name: &str) { assert_ne!(received_value, VALUE); } -fn test_get_queryable_deny_then_allow(interface_name: &str) { - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_get_queryable_deny_then_allow( + mut config_router: Config, + config_qbl: &Config, + config_get: &Config, + interface_name: &str, +) { let acl_js = format!( r#" {{ @@ -681,33 +460,12 @@ fn test_get_queryable_deny_then_allow(interface_name: &str) { interface_name ); config_router.insert_json5("acl", &acl_js).unwrap(); - let mut config_qbl = Config::default(); - config_qbl.set_mode(Some(WhatAmI::Client)).unwrap(); - config_qbl - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_qbl - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_get = Config::default(); - config_get.set_mode(Some(WhatAmI::Client)).unwrap(); - config_get - .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) - .unwrap(); - config_get - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl).res().unwrap(); - let get_session = zenoh::open(config_get).res().unwrap(); + let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); + let get_session = zenoh::open(config_get.clone()).res().unwrap(); let mut received_value = String::new(); let _qbl = qbl_session .declare_queryable(KEY_EXPR) From 6881e6d95deab2f5beab399f53d2e98b6456c643 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 23:00:30 +0200 Subject: [PATCH 094/122] clean tests code --- zenoh/tests/acl.rs | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index d006788ea4..ad085804da 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -51,31 +51,31 @@ fn test_acl() { let config_qbl = &config_pub; let config_get = &config_sub; - test_pub_sub_allow(config_router.clone(), &config_sub, &config_pub); - test_pub_sub_deny(config_router.clone(), &config_sub, &config_pub); - test_get_queryable_allow(config_router.clone(), config_qbl, config_get); - test_get_queryable_deny(config_router.clone(), config_qbl, config_get); + test_pub_sub_allow(&config_router, &config_sub, &config_pub); + test_pub_sub_deny(&config_router, &config_sub, &config_pub); + test_get_queryable_allow(&config_router, config_qbl, config_get); + test_get_queryable_deny(&config_router, config_qbl, config_get); if let Some(loopback_face) = get_loopback_interface() { test_pub_sub_allow_then_deny( - config_router.clone(), + &config_router, &config_sub, &config_pub, &loopback_face.name, ); test_pub_sub_deny_then_allow( - config_router.clone(), + &config_router, &config_sub, &config_pub, &loopback_face.name, ); test_get_queryable_allow_then_deny( - config_router.clone(), + &config_router, config_qbl, config_get, &loopback_face.name, ); test_get_queryable_deny_then_allow( - config_router.clone(), + &config_router, config_qbl, config_get, &loopback_face.name, @@ -89,7 +89,8 @@ fn get_loopback_interface() -> Option { ifs.into_iter().find(|i| i.is_loopback()) } -fn test_pub_sub_deny(mut config_router: Config, config_pub: &Config, config_sub: &Config) { +fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -126,7 +127,8 @@ fn test_pub_sub_deny(mut config_router: Config, config_pub: &Config, config_sub: assert_ne!(*zlock!(received_value), VALUE); } -fn test_pub_sub_allow(mut config_router: Config, config_pub: &Config, config_sub: &Config) { +fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -165,11 +167,12 @@ fn test_pub_sub_allow(mut config_router: Config, config_pub: &Config, config_sub assert_eq!(*zlock!(received_value), VALUE); } fn test_pub_sub_allow_then_deny( - mut config_router: Config, + config_router: &Config, config_pub: &Config, config_sub: &Config, interface_name: &str, ) { + let mut config_router = config_router.clone(); let acl_js = format!( r#" {{ @@ -221,11 +224,12 @@ fn test_pub_sub_allow_then_deny( assert_ne!(*zlock!(received_value), VALUE); } fn test_pub_sub_deny_then_allow( - mut config_router: Config, + config_router: &Config, config_pub: &Config, config_sub: &Config, interface_name: &str, ) { + let mut config_router = config_router.clone(); let acl_js = format!( r#" {{ @@ -277,7 +281,8 @@ fn test_pub_sub_deny_then_allow( assert_eq!(*zlock!(received_value), VALUE); } -fn test_get_queryable_deny(mut config_router: Config, config_qbl: &Config, config_get: &Config) { +fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_get: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -321,7 +326,8 @@ fn test_get_queryable_deny(mut config_router: Config, config_qbl: &Config, confi assert_ne!(received_value, VALUE); } -fn test_get_queryable_allow(mut config_router: Config, config_qbl: &Config, config_get: &Config) { +fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_get: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -365,11 +371,12 @@ fn test_get_queryable_allow(mut config_router: Config, config_qbl: &Config, conf } fn test_get_queryable_allow_then_deny( - mut config_router: Config, + config_router: &Config, config_qbl: &Config, config_get: &Config, interface_name: &str, ) { + let mut config_router = config_router.clone(); let acl_js = format!( r#" {{ @@ -428,11 +435,12 @@ fn test_get_queryable_allow_then_deny( } fn test_get_queryable_deny_then_allow( - mut config_router: Config, + config_router: &Config, config_qbl: &Config, config_get: &Config, interface_name: &str, ) { + let mut config_router = config_router.clone(); let acl_js = format!( r#" {{ From f239a4d040f66e7eb9a71e7ed7fb197dc4454b41 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 23:07:53 +0200 Subject: [PATCH 095/122] clean config file --- DEFAULT_CONFIG.json5 | 66 +++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index c85392225d..c28a00b0ac 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -7,13 +7,16 @@ /// If not set, a random unsigned 128bit integer will be used. /// WARNING: this id must be unique in your zenoh network. // id: "1234567890abcdef", + /// The node's mode (router, peer or client) - mode: "router", + mode: "peer", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ metadata: { name: "strawberry", location: "Penny Lane" }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: @@ -21,22 +24,22 @@ connect: { /// timeout waiting for all endpoints connected (0: no retry, -1: infinite timeout) /// Accepts a single value or different values for router, peer and client. - timeout_ms: { router: -1, peer: -1, client: 0 - }, + timeout_ms: { router: -1, peer: -1, client: 0 }, endpoints: [ // "/
" ], + /// Global connect configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#retry_period_init_ms=20000;retry_period_max_ms=10000" + /// exit from application, if timeout exceed - exit_on_failure: { router: false, peer: false, client: true - }, + exit_on_failure: { router: false, peer: false, client: true }, /// connect establishing retry configuration - retry: { + retry: { /// intial wait timeout until next connect try period_init_ms: 1000, /// maximum wait timeout until next connect try @@ -45,6 +48,7 @@ period_increase_factor: 2, }, }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, /// peers, or client can use to establish a zenoh session. @@ -58,15 +62,17 @@ endpoints: [ // "/
" ], + /// Global listen configuration, /// Accepts a single value or different values for router, peer and client. /// The configuration can also be specified for the separate endpoint /// it will override the global one /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" + /// exit from application, if timeout exceed exit_on_failure: true, /// listen retry configuration - retry: { + retry: { /// intial wait timeout until next try period_init_ms: 1000, /// maximum wait timeout until next try @@ -84,7 +90,7 @@ /// The multicast scouting configuration. multicast: { /// Whether multicast scouting is enabled or not - enabled: false, + enabled: true, /// The socket which should be used for multicast scouting address: "224.0.0.224:7446", /// The network interface which should be used for multicast scouting @@ -92,15 +98,14 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, + autoconnect: { router: "", peer: "router|peer" }, /// Whether or not to listen for scout messages on UDP multicast and reply to them. - listen: false, + listen: true, }, /// The gossip scouting configuration. gossip: { /// Whether gossip scouting is enabled or not - enabled: false, + enabled: true, /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. /// When false, gossip scouting informations are only propagated to the next hop. /// Activating multihop gossip implies more scouting traffic and a lower scalability. @@ -110,23 +115,24 @@ /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. /// Accepts a single value or different values for router, peer and client. /// Each value is bit-or-like combinations of "peer", "router" and "client". - autoconnect: { router: "", peer: "router|peer" - }, + autoconnect: { router: "", peer: "router|peer" }, }, }, + /// Configuration of data messages timestamps management. timestamping: { /// Whether data messages should be timestamped if not already. /// Accepts a single boolean value or different values for router, peer and client. - enabled: { router: true, peer: false, client: false - }, + enabled: { router: true, peer: false, client: false }, /// Whether data messages with timestamps in the future should be dropped or not. /// If set to false (default), messages with timestamps in the future are retimestamped. /// Timestamps are ignored if timestamping is disabled. drop_future_timestamp: false, }, + /// The default timeout to apply to queries in milliseconds. queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. routing: { /// The routing strategy to use in routers and it's configuration. @@ -143,6 +149,7 @@ mode: "peer_to_peer", }, }, + // /// The declarations aggregation strategy. // aggregation: { // /// A list of key-expressions for which all included subscribers will be aggregated into. @@ -154,6 +161,7 @@ // // key_expression // ], // }, + // /// The downsampling declaration. // downsampling: [ // { @@ -167,10 +175,10 @@ // ], // }, // ], - /// configure acl rules + /// configure acl rules // acl: { // ///[true/false] acl will be activated only if this is set to true - // "enabled": true, + // "enabled": false, // ///[deny/allow] default permission is deny (even if this is left empty or not specified) // "default_permission": "allow", // ///rule set for permissions allowing or denying access to key-expressions @@ -181,7 +189,7 @@ // "put" // ], // "flow":["egress","ingress"], - // "permission": "allow", + // "permission": "deny", // "key_expr": [ // "test/thr" // ], @@ -189,19 +197,6 @@ // "lo0" // ] // }, - // { - // "action": [ - // "declare_subscriber" - // ], - // "flow":["egress","ingress"], - // "permission": "allow", - // "interface": [ - // "lo0" - // ], - // "key_expr": [ - // "test/thr" - // ] - // }, // ] //} /// Configure internal transport parameters @@ -365,8 +360,8 @@ known_keys_file: null, }, }, - }, + /// Configure the Admin Space /// Unstable: this configuration part works as advertised, but may change in a future release adminspace: { @@ -376,6 +371,7 @@ write: false, }, }, + /// /// Plugins configurations /// @@ -503,6 +499,7 @@ // }, // }, // }, + // /// Plugin configuration example using `__config__` property // plugins: { // rest: { @@ -512,4 +509,5 @@ // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", // } // }, -} \ No newline at end of file + +} From fbfed488547b47cbb72364f6331ed872bedfb64e Mon Sep 17 00:00:00 2001 From: snehilzs Date: Sun, 7 Apr 2024 23:21:44 +0200 Subject: [PATCH 096/122] clean config file --- DEFAULT_CONFIG.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index c28a00b0ac..782fe3e0b2 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -198,7 +198,7 @@ // ] // }, // ] - //} + //}, /// Configure internal transport parameters transport: { unicast: { From 4be148d0f2ddd3f806855e186948983579a054c4 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 8 Apr 2024 10:17:01 +0200 Subject: [PATCH 097/122] clean code --- .gitignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitignore b/.gitignore index 82055d6af7..695d0464b1 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,3 @@ .vscode cargo-timing*.html - -# Remove test data -testsfiles \ No newline at end of file From fc4a66cab6c32a70e0e5b31543ed1f7d5b56ff32 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 8 Apr 2024 10:24:39 +0200 Subject: [PATCH 098/122] resolve conflicts --- Cargo.toml | 4 +++- zenoh/Cargo.toml | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index fd97fcfabd..73abb078d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,6 +102,7 @@ hmac = { version = "0.12.1", features = ["std"] } home = "0.5.4" http-types = "2.12.0" humantime = "2.1.0" +interfaces = "0.0.9" json5 = "0.4.1" jsonschema = { version = "0.17.1", default-features = false } keyed-set = "0.4.4" @@ -197,7 +198,8 @@ zenoh-link = { version = "0.11.0-dev", path = "io/zenoh-link" } zenoh-link-commons = { version = "0.11.0-dev", path = "io/zenoh-link-commons" } zenoh = { version = "0.11.0-dev", path = "zenoh", default-features = false } zenoh-runtime = { version = "0.11.0-dev", path = "commons/zenoh-runtime" } -interfaces = "0.0.9" +zenoh-task = { version = "0.11.0-dev", path = "commons/zenoh-task" } + [profile.dev] debug = true opt-level = 0 diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 45586e3be7..8e58eebb6f 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -76,6 +76,7 @@ flume = { workspace = true } form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } +interfaces = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } ordered-float = { workspace = true } @@ -107,7 +108,8 @@ zenoh-sync = { workspace = true } zenoh-transport = { workspace = true } zenoh-util = { workspace = true } zenoh-runtime = { workspace = true } -interfaces = { workspace = true } +zenoh-task = { workspace = true } + [build-dependencies] rustc_version = { workspace = true } From d847de1abf34052d828cee5066da089a4cc9fa42 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 8 Apr 2024 11:56:27 +0200 Subject: [PATCH 099/122] resolve conflicts issues --- zenoh/tests/acl.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index ad085804da..eff62ac960 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -121,9 +121,9 @@ fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &C .res() .unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); } @@ -161,9 +161,9 @@ fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: & .res() .unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); } fn test_pub_sub_allow_then_deny( @@ -218,9 +218,9 @@ fn test_pub_sub_allow_then_deny( .res() .unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); } fn test_pub_sub_deny_then_allow( @@ -275,9 +275,9 @@ fn test_pub_sub_deny_then_allow( .res() .unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_millis(10)); + std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); } From 73a5eaf0fd2fc18c6f3db70c528d4b915eba2ae8 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 9 Apr 2024 14:02:13 +0200 Subject: [PATCH 100/122] refactor code --- DEFAULT_CONFIG.json5 | 48 +- commons/zenoh-config/src/lib.rs | 12 +- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 + testsfiles/DEFAULT_CONFIG-sub.json5 | 16 + testsfiles/DEFAULT_CONFIG-test.json5 | 475 +++++++++++++++ .../net/routing/interceptor/access_control.rs | 259 +++----- .../net/routing/interceptor/authorization.rs | 32 +- zenoh/tests/acl.rs | 551 +++++++++++++----- 8 files changed, 1026 insertions(+), 385 deletions(-) create mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 782fe3e0b2..bf0956b1bf 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -175,30 +175,30 @@ // ], // }, // ], - /// configure acl rules - // acl: { - // ///[true/false] acl will be activated only if this is set to true - // "enabled": false, - // ///[deny/allow] default permission is deny (even if this is left empty or not specified) - // "default_permission": "allow", - // ///rule set for permissions allowing or denying access to key-expressions - // "rules": - // [ - // { - // "action": [ - // "put" - // ], - // "flow":["egress","ingress"], - // "permission": "deny", - // "key_expr": [ - // "test/thr" - // ], - // "interface": [ - // "lo0" - // ] - // }, - // ] - //}, + // /// configure acl rules + // acl: { + // ///[true/false] acl will be activated only if this is set to true + // "enabled": false, + // ///[deny/allow] default permission is deny (even if this is left empty or not specified) + // "default_permission": "deny", + // ///rule set for permissions allowing or denying access to key-expressions + // "rules": + // [ + // { + // "actions": [ + // "put" + // ], + // "flows":["egress","ingress"], + // "permission": "allow", + // "key_exprs": [ + // "test/thr" + // ], + // "interfaces": [ + // "lo0" + // ] + // }, + // ] + //}, /// Configure internal transport parameters transport: { unicast: { diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index c25efecbbe..21156958bb 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -101,10 +101,10 @@ pub struct DownsamplingItemConf { #[derive(Serialize, Debug, Deserialize, Clone)] pub struct AclConfigRules { - pub interface: Vec, - pub key_expr: Vec, - pub action: Vec, - pub flow: Vec, + pub interfaces: Vec, + pub key_exprs: Vec, + pub actions: Vec, + pub flows: Vec, pub permission: Permission, } @@ -114,7 +114,7 @@ pub struct PolicyRule { pub key_expr: String, pub action: Action, pub permission: Permission, - pub flow: DownsamplingFlow, + pub flow: InterceptorFlow, } #[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Hash, Clone)] @@ -140,7 +140,7 @@ pub enum Permission { Deny, } -pub type Flow = DownsamplingFlow; +pub type InterceptorFlow = DownsamplingFlow; pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 new file mode 100644 index 0000000000..6a039c3ce5 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-pub.json5 @@ -0,0 +1,18 @@ +{ + mode: "client", + connect: { + endpoints: [ + /// "tcp/192.168.11.1:7447", + "tcp/127.0.0.1:7447", + + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 new file mode 100644 index 0000000000..7aad9f7818 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-sub.json5 @@ -0,0 +1,16 @@ +{ + mode: "client", + connect: { + endpoints: [ + "tcp/127.0.0.1:7448", + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 new file mode 100644 index 0000000000..0969054e1f --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-test.json5 @@ -0,0 +1,475 @@ +// { +// mode: "router", +// listen: { +// endpoints: [ +// "tcp/127.0.0.1:7447", +// "tcp/127.0.0.1:7448", +// ], +// }, +// scouting: { +// multicast: { +// enabled: false, +// }, +// gossip: { +// enabled: false, +// }, +// }, +// acl: { +// ///[true/false] acl will be activated only if this is set to true +// "enabled": true, +// ///[deny/allow] default permission is deny (even if this is left empty or not specified) +// "default_permission": "allow", +// ///rule set for permissions allowing or denying access to key-expressions +// "rules": +// [ +// { +// "action": [ +// "put", +// "declare_subscriber", +// ], +// "flow":["egress","ingress"], +// "permission": "allow", +// "key_expr": [ +// "test/thr" +// ], +// "interface": [ +// "lo0" +// ] +// }, +// ] +// }, + +// } + + +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + + /// The node's mode (router, peer or client) + mode: "router", + + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + + + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + + + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) + /// Accepts a single value or different values for router, peer and client. + timeout_ms: 0, + + endpoints: [ + // "/
" + "tcp/127.0.0.1:7447", + "tcp/127.0.0.1:7448", + ], + + /// Global listen configuration, + /// Accepts a single value or different values for router, peer and client. + /// The configuration can also be specified for the separate endpoint + /// it will override the global one + /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" + + /// exit from application, if timeout exceed + exit_on_failure: true, + /// listen retry configuration + retry: { + /// intial wait timeout until next try + period_init_ms: 1000, + /// maximum wait timeout until next try + period_max_ms: 4000, + /// increase factor for the next timeout until next try + period_increase_factor: 2, + }, + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, + + + + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// configure acl rules + acl: { + + ///[true/false] acl will be activated only if this is set to true + "enabled": true, + ///[deny/allow] default permission is deny (even if this is left empty or not specified) + "default_permission": "deny", + ///rule set for permissions allowing or denying access to key-expressions + "rules": + [ + { + "actions": [ + "put", + "declare_subscriber" + ], + "flows":["egress","ingress"], + "permission": "allow", + "key_exprs": [ + "test/thr" + ], + "interfaces": [ + "lo0" + ] + }, + ] + }, + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive interval to one fourth of the lease time: i.e. send + /// 4 keep_alive messages in a lease period. Changing the lease time will have the + /// keep_alive messages sent more or less often. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// Congestion occurs when the queue is empty (no available batch). + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. + congestion_control: { + /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. + wait_before_drop: 1000 + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: null, + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: false, + /// Path to the TLS client private key + client_private_key: null, + /// Path to the TLS client public certificate + client_certificate: null, + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, + +} diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 356784b598..1673ed6089 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -26,7 +26,7 @@ use crate::net::routing::RoutingContext; use crate::KeyExpr; use std::any::Any; use std::sync::Arc; -use zenoh_config::{AclConfig, Action, Flow, Permission, Subject, ZenohId}; +use zenoh_config::{AclConfig, Action, InterceptorFlow, Permission, Subject, ZenohId}; use zenoh_protocol::{ network::{Declare, DeclareBody, NetworkBody, NetworkMessage, Push, Request}, zenoh::{PushBody, RequestBody}, @@ -154,38 +154,44 @@ impl InterceptorTrait for IngressAclEnforcer { }) .or_else(|| ctx.full_expr())?; - if let NetworkBody::Push(Push { - payload: PushBody::Put(_), - .. - }) = &ctx.msg.body - { - if self.put(key_expr) == Permission::Deny { - return None; + match &ctx.msg.body { + NetworkBody::Push(Push { + payload: PushBody::Put(_), + .. + }) => { + if self.action(Action::Put, "Put", key_expr) == Permission::Deny { + return None; + } } - } else if let NetworkBody::Request(Request { - payload: RequestBody::Query(_), - .. - }) = &ctx.msg.body - { - if self.get(key_expr) == Permission::Deny { - return None; + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(Action::Get, "Get", key_expr) == Permission::Deny { + return None; + } } - } else if let NetworkBody::Declare(Declare { - body: DeclareBody::DeclareSubscriber(_), - .. - }) = &ctx.msg.body - { - if self.declare_subscriber(key_expr) == Permission::Deny { - return None; + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareSubscriber(_), + .. + }) => { + if self.action(Action::DeclareSubscriber, "Declare Subscriber", key_expr) + == Permission::Deny + { + return None; + } } - } else if let NetworkBody::Declare(Declare { - body: DeclareBody::DeclareQueryable(_), - .. - }) = &ctx.msg.body - { - if self.declare_queryable(key_expr) == Permission::Deny { - return None; + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareQueryable(_), + .. + }) => { + if self.action(Action::DeclareQueryable, "Declare Queryable", key_expr) + == Permission::Deny + { + return None; + } } + _ => {} } Some(ctx) } @@ -210,38 +216,44 @@ impl InterceptorTrait for EgressAclEnforcer { }) .or_else(|| ctx.full_expr())?; - if let NetworkBody::Push(Push { - payload: PushBody::Put(_), - .. - }) = &ctx.msg.body - { - if self.put(key_expr) == Permission::Deny { - return None; + match &ctx.msg.body { + NetworkBody::Push(Push { + payload: PushBody::Put(_), + .. + }) => { + if self.action(Action::Put, "Put", key_expr) == Permission::Deny { + return None; + } } - } else if let NetworkBody::Request(Request { - payload: RequestBody::Query(_), - .. - }) = &ctx.msg.body - { - if self.get(key_expr) == Permission::Deny { - return None; + NetworkBody::Request(Request { + payload: RequestBody::Query(_), + .. + }) => { + if self.action(Action::Get, "Get", key_expr) == Permission::Deny { + return None; + } } - } else if let NetworkBody::Declare(Declare { - body: DeclareBody::DeclareSubscriber(_), - .. - }) = &ctx.msg.body - { - if self.declare_subscriber(key_expr) == Permission::Deny { - return None; + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareSubscriber(_), + .. + }) => { + if self.action(Action::DeclareSubscriber, "Declare Subscriber", key_expr) + == Permission::Deny + { + return None; + } } - } else if let NetworkBody::Declare(Declare { - body: DeclareBody::DeclareQueryable(_), - .. - }) = &ctx.msg.body - { - if self.declare_queryable(key_expr) == Permission::Deny { - return None; + NetworkBody::Declare(Declare { + body: DeclareBody::DeclareQueryable(_), + .. + }) => { + if self.action(Action::DeclareQueryable, "Declare Queryable", key_expr) + == Permission::Deny + { + return None; + } } + _ => {} } Some(ctx) } @@ -250,127 +262,15 @@ pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohId; - fn flow(&self) -> Flow; - - fn put(&self, key_expr: &str) -> Permission { - let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); - let zid = self.zid(); - let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { - match policy_enforcer.policy_decision_point( - *subject, - self.flow(), - Action::Put, - key_expr, - ) { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return Permission::Deny; - } - } - } - - if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to Put", zid); - return Permission::Deny; - } - log::trace!("[ACCESS LOG]: {} is authorized access to Put", zid); - Permission::Allow - } - - fn get(&self, key_expr: &str) -> Permission { - let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); - let zid = self.zid(); - let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { - match policy_enforcer.policy_decision_point( - *subject, - self.flow(), - Action::Get, - key_expr, - ) { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return Permission::Deny; - } - } - } - - if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to Query/Get", zid); - return Permission::Deny; - } - log::trace!("[ACCESS LOG]: {} is authorized access to Query/Get", zid); - Permission::Allow - } - fn declare_subscriber(&self, key_expr: &str) -> Permission { - let policy_enforcer = self.policy_enforcer(); - let interface_list = self.interface_list(); - let zid = self.zid(); - let mut decision = policy_enforcer.default_permission; - for subject in &interface_list { - match policy_enforcer.policy_decision_point( - *subject, - self.flow(), - Action::DeclareSubscriber, - key_expr, - ) { - Ok(Permission::Allow) => { - decision = Permission::Allow; - break; - } - Ok(Permission::Deny) => { - decision = Permission::Deny; - continue; - } - Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); - return Permission::Deny; - } - } - } - - if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to be a Subscriber", zid); - return Permission::Deny; - } - log::trace!( - "[ACCESS LOG]: {} is authorized access to be a Subscriber", - zid - ); - Permission::Allow - } + fn flow(&self) -> InterceptorFlow; - fn declare_queryable(&self, key_expr: &str) -> Permission { + fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; for subject in &interface_list { - match policy_enforcer.policy_decision_point( - *subject, - self.flow(), - Action::DeclareQueryable, - key_expr, - ) { + match policy_enforcer.policy_decision_point(*subject, self.flow(), action, key_expr) { Ok(Permission::Allow) => { decision = Permission::Allow; break; @@ -387,13 +287,10 @@ pub trait AclActionMethods { } if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to be a Queryable", zid); + log::debug!("[ACCESS LOG]: {} is unauthorized to {}", zid, log_msg); return Permission::Deny; } - log::trace!( - "[ACCESS LOG]: {} is authorized access to be a Queryable", - zid - ); + log::trace!("[ACCESS LOG]: {} is authorized to {}", zid, log_msg); Permission::Allow } } @@ -410,8 +307,8 @@ impl AclActionMethods for EgressAclEnforcer { fn zid(&self) -> ZenohId { self.zid } - fn flow(&self) -> Flow { - Flow::Egress + fn flow(&self) -> InterceptorFlow { + InterceptorFlow::Egress } } @@ -427,7 +324,7 @@ impl AclActionMethods for IngressAclEnforcer { fn zid(&self) -> ZenohId { self.zid } - fn flow(&self) -> Flow { - Flow::Ingress + fn flow(&self) -> InterceptorFlow { + InterceptorFlow::Ingress } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 3154f58751..100626376a 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -19,7 +19,9 @@ //! [Click here for Zenoh's documentation](../zenoh/index.html) use ahash::RandomState; use std::collections::HashMap; -use zenoh_config::{AclConfig, AclConfigRules, Action, Flow, Permission, PolicyRule, Subject}; +use zenoh_config::{ + AclConfig, AclConfigRules, Action, InterceptorFlow, Permission, PolicyRule, Subject, +}; use zenoh_keyexpr::keyexpr; use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; @@ -84,16 +86,16 @@ pub struct FlowPolicy { } impl FlowPolicy { - fn flow(&self, flow: Flow) -> &ActionPolicy { + fn flow(&self, flow: InterceptorFlow) -> &ActionPolicy { match flow { - Flow::Ingress => &self.ingress, - Flow::Egress => &self.egress, + InterceptorFlow::Ingress => &self.ingress, + InterceptorFlow::Egress => &self.egress, } } - fn flow_mut(&mut self, flow: Flow) -> &mut ActionPolicy { + fn flow_mut(&mut self, flow: InterceptorFlow) -> &mut ActionPolicy { match flow { - Flow::Ingress => &mut self.ingress, - Flow::Egress => &mut self.egress, + InterceptorFlow::Ingress => &mut self.ingress, + InterceptorFlow::Egress => &mut self.egress, } } } @@ -175,10 +177,10 @@ impl PolicyEnforcer { ) -> ZResult { let mut policy_rules: Vec = Vec::new(); for config_rule in config_rule_set { - for subject in &config_rule.interface { - for flow in &config_rule.flow { - for action in &config_rule.action { - for key_expr in &config_rule.key_expr { + for subject in &config_rule.interfaces { + for flow in &config_rule.flows { + for action in &config_rule.actions { + for key_expr in &config_rule.key_exprs { policy_rules.push(PolicyRule { subject: Subject::Interface(subject.clone()), key_expr: key_expr.clone(), @@ -195,8 +197,10 @@ impl PolicyEnforcer { let mut counter = 1; //starting at 1 since 0 is the init value and should not match anything for rule in policy_rules.iter() { - subject_map.insert(rule.subject.clone(), counter); - counter += 1; + if !subject_map.contains_key(&rule.subject) { + subject_map.insert(rule.subject.clone(), counter); + counter += 1; + } } Ok(PolicyInformation { subject_map, @@ -211,7 +215,7 @@ impl PolicyEnforcer { pub fn policy_decision_point( &self, subject: i32, - flow: Flow, + flow: InterceptorFlow, action: Action, key_expr: &str, ) -> ZResult { diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index eff62ac960..7fc41a7f6e 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,28 +1,22 @@ -use interfaces::Interface; use std::sync::{Arc, Mutex}; use zenoh::prelude::sync::*; use zenoh_config::Config; use zenoh_core::zlock; - #[test] +#[cfg(not(target_os = "windows"))] fn test_acl() { env_logger::init(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec![ - "tcp/localhost:7447".parse().unwrap(), - "tcp/localhost:7448".parse().unwrap(), - ]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + test_pub_sub_allow(); + test_pub_sub_deny(); + test_pub_sub_allow_then_deny(); + test_pub_sub_deny_then_allow(); + test_get_queryable_allow(); + test_get_queryable_allow_then_deny(); + test_get_queryable_deny(); + test_get_queryable_deny_then_allow(); +} +fn test_pub_sub_deny() { let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); config_sub @@ -39,7 +33,7 @@ fn test_acl() { config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); config_pub .connect - .set_endpoints(vec!["tcp/localhost:7448".parse().unwrap()]) + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) .unwrap(); config_pub @@ -47,50 +41,19 @@ fn test_acl() { .multicast .set_enabled(Some(false)) .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); - let config_qbl = &config_pub; - let config_get = &config_sub; - - test_pub_sub_allow(&config_router, &config_sub, &config_pub); - test_pub_sub_deny(&config_router, &config_sub, &config_pub); - test_get_queryable_allow(&config_router, config_qbl, config_get); - test_get_queryable_deny(&config_router, config_qbl, config_get); - if let Some(loopback_face) = get_loopback_interface() { - test_pub_sub_allow_then_deny( - &config_router, - &config_sub, - &config_pub, - &loopback_face.name, - ); - test_pub_sub_deny_then_allow( - &config_router, - &config_sub, - &config_pub, - &loopback_face.name, - ); - test_get_queryable_allow_then_deny( - &config_router, - config_qbl, - config_get, - &loopback_face.name, - ); - test_get_queryable_deny_then_allow( - &config_router, - config_qbl, - config_get, - &loopback_face.name, - ); - } -} - -fn get_loopback_interface() -> Option { - let mut ifs = Interface::get_all().expect("could not get interfaces"); - ifs.sort_by(|a, b| a.name.cmp(&b.name)); - ifs.into_iter().find(|i| i.is_loopback()) -} - -fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { - let mut config_router = config_router.clone(); + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + //let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -112,7 +75,7 @@ fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &C let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = &sub_session + let _subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); @@ -125,10 +88,52 @@ fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &C publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); + _subscriber.undeclare().res().unwrap(); + publisher.undeclare().res().unwrap(); + pub_session.close().res().unwrap(); + sub_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { - let mut config_router = config_router.clone(); +fn test_pub_sub_allow() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + //let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -146,6 +151,7 @@ fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: & const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); @@ -165,42 +171,76 @@ fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: & publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); + _subscriber.undeclare().res().unwrap(); + publisher.undeclare().res().unwrap(); + pub_session.close().res().unwrap(); + sub_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_pub_sub_allow_then_deny( - config_router: &Config, - config_pub: &Config, - config_sub: &Config, - interface_name: &str, -) { - let mut config_router = config_router.clone(); - let acl_js = format!( - r#" - {{ - + +fn test_pub_sub_allow_then_deny() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let acl_js = r#" + { + "enabled": true, "default_permission": "allow", "rules": [ - {{ + { "permission": "deny", - "flow": ["egress"], - "action": [ + "flows": ["egress"], + "actions": [ "put", ], - "key_expr": [ + "key_exprs": [ "test/demo" ], - "interface": [ - "{}" + "interfaces": [ + "lo","lo0" ] - }}, + }, ] - - }} - "#, - interface_name - ); - config_router.insert_json5("acl", &acl_js).unwrap(); + + } + "#; + config_router.insert_json5("acl", acl_js).unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); @@ -222,44 +262,77 @@ fn test_pub_sub_allow_then_deny( publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); + _subscriber.undeclare().res().unwrap(); + publisher.undeclare().res().unwrap(); + pub_session.close().res().unwrap(); + sub_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_pub_sub_deny_then_allow( - config_router: &Config, - config_pub: &Config, - config_sub: &Config, - interface_name: &str, -) { - let mut config_router = config_router.clone(); - let acl_js = format!( - r#" - {{ - "enabled": true, +fn test_pub_sub_deny_then_allow() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let acl_js = r#" + { "enabled": true, "default_permission": "deny", "rules": [ - {{ + { "permission": "allow", - "flow": ["egress","ingress"], - "action": [ + "flows": ["egress","ingress"], + "actions": [ "put", "declare_subscriber" ], - "key_expr": [ + "key_exprs": [ "test/demo" ], - "interface": [ - "{}" + "interfaces": [ + "lo","lo0" ] - }}, + }, ] - }} - "#, - interface_name - ); - config_router.insert_json5("acl", &acl_js).unwrap(); + } + "#; + config_router.insert_json5("acl", acl_js).unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let _session = zenoh::open(config_router).res().unwrap(); let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); @@ -279,10 +352,51 @@ fn test_pub_sub_deny_then_allow( publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); + _subscriber.undeclare().res().unwrap(); + publisher.undeclare().res().unwrap(); + pub_session.close().res().unwrap(); + sub_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_get: &Config) { - let mut config_router = config_router.clone(); +fn test_get_queryable_deny() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); config_router .insert_json5( "acl", @@ -291,7 +405,7 @@ fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_g "default_permission": "deny", "rules": [ - + ] }"#, ) @@ -299,6 +413,8 @@ fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_g const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let config_qbl = &config_pub; + let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -324,10 +440,50 @@ fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_g } std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(received_value, VALUE); + _qbl.undeclare().res().unwrap(); + qbl_session.close().res().unwrap(); + get_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_get: &Config) { - let mut config_router = config_router.clone(); +fn test_get_queryable_allow() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); config_router .insert_json5( "acl", @@ -343,6 +499,8 @@ fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_ const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let config_qbl = &config_pub; + let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -368,45 +526,80 @@ fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_ } std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(received_value, VALUE); + _qbl.undeclare().res().unwrap(); + qbl_session.close().res().unwrap(); + get_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_get_queryable_allow_then_deny( - config_router: &Config, - config_qbl: &Config, - config_get: &Config, - interface_name: &str, -) { - let mut config_router = config_router.clone(); - let acl_js = format!( - r#" - {{ +fn test_get_queryable_allow_then_deny() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let acl_js = r#" + { "enabled": true, "default_permission": "allow", "rules": [ - {{ + { "permission": "deny", - "flow": ["egress"], - "action": [ + "flows": ["egress"], + "actions": [ "get", "declare_queryable" ], - "key_expr": [ + "key_exprs": [ "test/demo" ], - "interface": [ - "{}" + "interfaces": [ + "lo", + "lo0" ] - }}, + }, ] - }} - "#, - interface_name - ); - config_router.insert_json5("acl", &acl_js).unwrap(); + } + "#; + config_router.insert_json5("acl", acl_js).unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let config_qbl = &config_pub; + let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -432,45 +625,79 @@ fn test_get_queryable_allow_then_deny( } std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(received_value, VALUE); + _qbl.undeclare().res().unwrap(); + qbl_session.close().res().unwrap(); + get_session.close().res().unwrap(); + _session.close().res().unwrap(); } -fn test_get_queryable_deny_then_allow( - config_router: &Config, - config_qbl: &Config, - config_get: &Config, - interface_name: &str, -) { - let mut config_router = config_router.clone(); - let acl_js = format!( - r#" - {{ +fn test_get_queryable_deny_then_allow() { + let mut config_sub = Config::default(); + + config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_sub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + config_sub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + + let mut config_pub = Config::default(); + config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); + config_pub + .connect + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_pub + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); + let acl_js = r#" + { "enabled": true, "default_permission": "deny", "rules": [ - {{ + { "permission": "allow", - "flow": ["egress","ingress"], - "action": [ + "flows": ["egress","ingress"], + "actions": [ "get", "declare_queryable" ], - "key_expr": [ + "key_exprs": [ "test/demo" ], - "interface": [ - "{}" + "interfaces": [ + "lo","lo0" ] - }}, + }, ] - }} - "#, - interface_name - ); - config_router.insert_json5("acl", &acl_js).unwrap(); + } + "#; + config_router.insert_json5("acl", acl_js).unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; + let config_qbl = &config_pub; + let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -496,4 +723,8 @@ fn test_get_queryable_deny_then_allow( } std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(received_value, VALUE); + _qbl.undeclare().res().unwrap(); + qbl_session.close().res().unwrap(); + get_session.close().res().unwrap(); + _session.close().res().unwrap(); } From a63b460edec101cb7c40932b2d5f1c2c5e3f17df Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 9 Apr 2024 14:02:44 +0200 Subject: [PATCH 101/122] refactor code --- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 - testsfiles/DEFAULT_CONFIG-sub.json5 | 16 - testsfiles/DEFAULT_CONFIG-test.json5 | 475 --------------------------- 3 files changed, 509 deletions(-) delete mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 deleted file mode 100644 index 6a039c3ce5..0000000000 --- a/testsfiles/DEFAULT_CONFIG-pub.json5 +++ /dev/null @@ -1,18 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - /// "tcp/192.168.11.1:7447", - "tcp/127.0.0.1:7447", - - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 deleted file mode 100644 index 7aad9f7818..0000000000 --- a/testsfiles/DEFAULT_CONFIG-sub.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - "tcp/127.0.0.1:7448", - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 deleted file mode 100644 index 0969054e1f..0000000000 --- a/testsfiles/DEFAULT_CONFIG-test.json5 +++ /dev/null @@ -1,475 +0,0 @@ -// { -// mode: "router", -// listen: { -// endpoints: [ -// "tcp/127.0.0.1:7447", -// "tcp/127.0.0.1:7448", -// ], -// }, -// scouting: { -// multicast: { -// enabled: false, -// }, -// gossip: { -// enabled: false, -// }, -// }, -// acl: { -// ///[true/false] acl will be activated only if this is set to true -// "enabled": true, -// ///[deny/allow] default permission is deny (even if this is left empty or not specified) -// "default_permission": "allow", -// ///rule set for permissions allowing or denying access to key-expressions -// "rules": -// [ -// { -// "action": [ -// "put", -// "declare_subscriber", -// ], -// "flow":["egress","ingress"], -// "permission": "allow", -// "key_expr": [ -// "test/thr" -// ], -// "interface": [ -// "lo0" -// ] -// }, -// ] -// }, - -// } - - -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - // id: "1234567890abcdef", - - /// The node's mode (router, peer or client) - mode: "router", - - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - - - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: - /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 - - - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: - /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 - listen: { - /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. - timeout_ms: 0, - - endpoints: [ - // "/
" - "tcp/127.0.0.1:7447", - "tcp/127.0.0.1:7448", - ], - - /// Global listen configuration, - /// Accepts a single value or different values for router, peer and client. - /// The configuration can also be specified for the separate endpoint - /// it will override the global one - /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" - - /// exit from application, if timeout exceed - exit_on_failure: true, - /// listen retry configuration - retry: { - /// intial wait timeout until next try - period_init_ms: 1000, - /// maximum wait timeout until next try - period_max_ms: 4000, - /// increase factor for the next timeout until next try - period_increase_factor: 2, - }, - }, - /// Configure the scouting mechanisms and their behaviours - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, - - - - // /// The declarations aggregation strategy. - // aggregation: { - // /// A list of key-expressions for which all included subscribers will be aggregated into. - // subscribers: [ - // // key_expression - // ], - // /// A list of key-expressions for which all included publishers will be aggregated into. - // publishers: [ - // // key_expression - // ], - // }, - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, - // ], - // }, - // ], - /// configure acl rules - acl: { - - ///[true/false] acl will be activated only if this is set to true - "enabled": true, - ///[deny/allow] default permission is deny (even if this is left empty or not specified) - "default_permission": "deny", - ///rule set for permissions allowing or denying access to key-expressions - "rules": - [ - { - "actions": [ - "put", - "declare_subscriber" - ], - "flows":["egress","ingress"], - "permission": "allow", - "key_exprs": [ - "test/thr" - ], - "interfaces": [ - "lo0" - ] - }, - ] - }, - /// Configure internal transport parameters - transport: { - unicast: { - /// Timeout in milliseconds when opening a link - accept_timeout: 10000, - /// Maximum number of zenoh session in pending state while accepting - accept_pending: 100, - /// Maximum number of sessions that can be simultaneously alive - max_sessions: 1000, - /// Maximum number of incoming links that are admitted per session - max_links: 1, - /// Enables the LowLatency transport - /// This option does not make LowLatency transport mandatory, the actual implementation of transport - /// used will depend on Establish procedure and other party's settings - /// - /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - /// enable 'lowlatency' you need to explicitly disable 'qos'. - lowlatency: false, - /// Enables QoS on unicast communications. - qos: { - enabled: true, - }, - /// Enables compression on unicast communications. - /// Compression capabilities are negotiated during session establishment. - /// If both Zenoh nodes support compression, then compression is activated. - compression: { - enabled: false, - }, - }, - multicast: { - /// Enables QoS on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - qos: { - enabled: false, - }, - /// Enables compression on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - compression: { - enabled: false, - }, - }, - link: { - /// An optional whitelist of protocols to be used for accepting and opening sessions. - /// If not configured, all the supported protocols are automatically whitelisted. - /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] - /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], - /// Configure the zenoh TX parameters of a link - tx: { - /// The resolution in bits to be used for the message sequence numbers. - /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - /// Accepted values: 8bit, 16bit, 32bit, 64bit. - sequence_number_resolution: "32bit", - /// Link lease duration in milliseconds to announce to other zenoh nodes - lease: 10000, - /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - /// messages will be sent at the configured time interval. - /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - /// set the actual keep_alive interval to one fourth of the lease time: i.e. send - /// 4 keep_alive messages in a lease period. Changing the lease time will have the - /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - /// check which considers a link as failed when no messages are received in 3.5 times the - /// target interval. - keep_alive: 4, - /// Batch size in bytes is expressed as a 16bit unsigned integer. - /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - /// The default batch size value is the maximum batch size: 65535. - batch_size: 65535, - /// Each zenoh link has a transmission queue that can be configured - queue: { - /// The size of each priority queue indicates the number of batches a given queue can contain. - /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - /// If qos is false, then only the DATA priority will be allocated. - size: { - control: 1, - real_time: 1, - interactive_high: 1, - interactive_low: 1, - data_high: 2, - data: 4, - data_low: 4, - background: 4, - }, - /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. - /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. - congestion_control: { - /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. - wait_before_drop: 1000 - }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, - }, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, - }, - /// Configure the zenoh RX parameters of a link - rx: { - /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - /// more in-flight data. This is particularly relevant when dealing with large messages. - /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - buffer_size: 65535, - /// Maximum size of the defragmentation buffer at receiver end. - /// Fragmented messages that are larger than the configured size will be dropped. - /// The default value is 1GiB. This would work in most scenarios. - /// NOTE: reduce the value if you are operating on a memory constrained device. - max_message_size: 1073741824, - }, - /// Configure TLS specific parameters - tls: { - /// Path to the certificate of the certificate authority used to validate either the server - /// or the client's keys and certificates, depending on the node's mode. If not specified - /// on router mode then the default WebPKI certificates are used instead. - root_ca_certificate: null, - /// Path to the TLS server private key - server_private_key: null, - /// Path to the TLS server public certificate - server_certificate: null, - /// Client authentication, if true enables mTLS (mutual authentication) - client_auth: false, - /// Path to the TLS client private key - client_private_key: null, - /// Path to the TLS client public certificate - client_certificate: null, - // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - server_name_verification: null, - }, - }, - /// Shared memory configuration - shared_memory: { - enabled: false, - }, - /// Access control configuration - auth: { - /// The configuration of authentification. - /// A password implies a username is required. - usrpwd: { - user: null, - password: null, - /// The path to a file containing the user password dictionary - dictionary_file: null, - }, - pubkey: { - public_key_pem: null, - private_key_pem: null, - public_key_file: null, - private_key_file: null, - key_size: null, - known_keys_file: null, - }, - }, - }, - - /// Configure the Admin Space - /// Unstable: this configuration part works as advertised, but may change in a future release - adminspace: { - // read and/or write permissions on the admin space - permissions: { - read: true, - write: false, - }, - }, - - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, - -} From aeda02073d4c7ba9f516042bd1a6346b36a6119e Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 12:19:14 +0200 Subject: [PATCH 102/122] refactor code --- Cargo.lock | 37 ------------------------ Cargo.toml | 3 +- commons/zenoh-config/src/lib.rs | 1 + zenoh/Cargo.toml | 1 - zenoh/src/net/routing/interceptor/mod.rs | 2 -- 5 files changed, 2 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6c2f012ffc..e497fc138b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1460,20 +1460,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "handlebars" -version = "3.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" -dependencies = [ - "log", - "pest", - "pest_derive", - "quick-error", - "serde", - "serde_json", -] - [[package]] name = "hashbrown" version = "0.13.2" @@ -1677,22 +1663,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "interfaces" -version = "0.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6250a98af259a26fd5a4a6081fccea9ac116e4c3178acf4aeb86d32d2b7715" -dependencies = [ - "bitflags 2.4.2", - "cc", - "handlebars", - "lazy_static", - "libc", - "nix 0.26.4", - "serde", - "serde_derive", -] - [[package]] name = "io-lifetimes" version = "1.0.11" @@ -2489,12 +2459,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quinn" version = "0.10.2" @@ -4420,7 +4384,6 @@ dependencies = [ "form_urlencoded", "futures", "git-version", - "interfaces", "lazy_static", "log", "ordered-float", diff --git a/Cargo.toml b/Cargo.toml index 76af8f96f3..a3e1f5965c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,7 +103,6 @@ hmac = { version = "0.12.1", features = ["std"] } home = "0.5.4" http-types = "2.12.0" humantime = "2.1.0" -interfaces = "0.0.9" json5 = "0.4.1" jsonschema = { version = "0.17.1", default-features = false } keyed-set = "0.4.4" @@ -218,4 +217,4 @@ debug = false # If you want debug symbol in release mode, set the env variab lto = "fat" codegen-units = 1 opt-level = 3 -panic = "abort" \ No newline at end of file +panic = "abort" diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 21156958bb..98d7819b3b 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -15,6 +15,7 @@ //! Configuration to pass to `zenoh::open()` and `zenoh::scout()` functions and associated constants. pub mod defaults; mod include; + use include::recursive_include; use secrecy::{CloneableSecret, DebugSecret, Secret, SerializableSecret, Zeroize}; use serde::{Deserialize, Serialize}; diff --git a/zenoh/Cargo.toml b/zenoh/Cargo.toml index 8e58eebb6f..e5d2618fba 100644 --- a/zenoh/Cargo.toml +++ b/zenoh/Cargo.toml @@ -76,7 +76,6 @@ flume = { workspace = true } form_urlencoded = { workspace = true } futures = { workspace = true } git-version = { workspace = true } -interfaces = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } ordered-float = { workspace = true } diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index b8009860ec..abeb2d38c0 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -61,12 +61,10 @@ pub(crate) type InterceptorFactory = Box ZResult> { let mut res: Vec = vec![]; - // Uncomment to log the interceptors initialisation // res.push(Box::new(LoggerInterceptor {})); res.extend(downsampling_interceptor_factories(config.downsampling())?); res.extend(acl_interceptor_factories(config.acl())?); - Ok(res) } From a8b88b6422d7a61a674cc90b894a970093cde70f Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 15:09:32 +0200 Subject: [PATCH 103/122] refactor code for review changes --- commons/zenoh-config/src/lib.rs | 5 +- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 + testsfiles/DEFAULT_CONFIG-sub.json5 | 16 + testsfiles/DEFAULT_CONFIG-test.json5 | 474 ++++++++++++++++++ .../net/routing/interceptor/access_control.rs | 33 +- .../net/routing/interceptor/authorization.rs | 48 +- .../net/routing/interceptor/downsampling.rs | 8 +- zenoh/tests/acl.rs | 2 - 8 files changed, 570 insertions(+), 34 deletions(-) create mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index 98d7819b3b..c074c47312 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -75,7 +75,7 @@ pub type SecretValue = Secret; #[derive(Debug, Deserialize, Serialize, Clone, Copy)] #[serde(rename_all = "lowercase")] -pub enum DownsamplingFlow { +pub enum InterceptorFlow { Egress, Ingress, } @@ -97,7 +97,7 @@ pub struct DownsamplingItemConf { /// A list of interfaces to which the downsampling will be applied. pub rules: Vec, /// Downsampling flow direction: egress, ingress - pub flow: DownsamplingFlow, + pub flow: InterceptorFlow, } #[derive(Serialize, Debug, Deserialize, Clone)] @@ -141,7 +141,6 @@ pub enum Permission { Deny, } -pub type InterceptorFlow = DownsamplingFlow; pub trait ConfigValidator: Send + Sync { fn check_config( &self, diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 new file mode 100644 index 0000000000..6a039c3ce5 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-pub.json5 @@ -0,0 +1,18 @@ +{ + mode: "client", + connect: { + endpoints: [ + /// "tcp/192.168.11.1:7447", + "tcp/127.0.0.1:7447", + + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 new file mode 100644 index 0000000000..7aad9f7818 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-sub.json5 @@ -0,0 +1,16 @@ +{ + mode: "client", + connect: { + endpoints: [ + "tcp/127.0.0.1:7448", + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 new file mode 100644 index 0000000000..f9e26594ad --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-test.json5 @@ -0,0 +1,474 @@ +// { +// mode: "router", +// listen: { +// endpoints: [ +// "tcp/127.0.0.1:7447", +// "tcp/127.0.0.1:7448", +// ], +// }, +// scouting: { +// multicast: { +// enabled: false, +// }, +// gossip: { +// enabled: false, +// }, +// }, +// acl: { +// ///[true/false] acl will be activated only if this is set to true +// "enabled": true, +// ///[deny/allow] default permission is deny (even if this is left empty or not specified) +// "default_permission": "allow", +// ///rule set for permissions allowing or denying access to key-expressions +// "rules": +// [ +// { +// "action": [ +// "put", +// "declare_subscriber", +// ], +// "flow":["egress","ingress"], +// "permission": "allow", +// "key_expr": [ +// "test/thr" +// ], +// "interface": [ +// "lo0" +// ] +// }, +// ] +// }, + +// } + + +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + + /// The node's mode (router, peer or client) + mode: "router", + + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + + + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + + + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) + /// Accepts a single value or different values for router, peer and client. + timeout_ms: 0, + + endpoints: [ + // "/
" + "tcp/127.0.0.1:7447", + "tcp/127.0.0.1:7448", + ], + + /// Global listen configuration, + /// Accepts a single value or different values for router, peer and client. + /// The configuration can also be specified for the separate endpoint + /// it will override the global one + /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" + + /// exit from application, if timeout exceed + exit_on_failure: true, + /// listen retry configuration + retry: { + /// intial wait timeout until next try + period_init_ms: 1000, + /// maximum wait timeout until next try + period_max_ms: 4000, + /// increase factor for the next timeout until next try + period_increase_factor: 2, + }, + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, + + + + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// configure acl rules + acl: { + + ///[true/false] acl will be activated only if this is set to true + "enabled": true, + ///[deny/allow] default permission is deny (even if this is left empty or not specified) + "default_permission": "deny", + ///rule set for permissions allowing or denying access to key-expressions + "rules": + [ + { + "actions": [ + "put","declare_subscriber" + ], + "flows":["egress","ingress"], + "permission": "allow", + "key_exprs": [ + "test/thr" + ], + "interfaces": [ + "lo0" + ] + }, + ] + }, + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive interval to one fourth of the lease time: i.e. send + /// 4 keep_alive messages in a lease period. Changing the lease time will have the + /// keep_alive messages sent more or less often. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// Congestion occurs when the queue is empty (no available batch). + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. + congestion_control: { + /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. + wait_before_drop: 1000 + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: null, + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: false, + /// Path to the TLS client private key + client_private_key: null, + /// Path to the TLS client public certificate + client_certificate: null, + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, + +} diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 1673ed6089..5c89394e87 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -67,7 +67,7 @@ pub(crate) fn acl_interceptor_factories( ), } } else { - log::warn!("[ACCESS LOG]: Access Control is disabled in config!"); + log::info!("[ACCESS LOG]: Access Control is disabled in config!"); } Ok(res) @@ -101,18 +101,25 @@ impl InterceptorFactoryTrait for AclEnforcer { return (None, None); } } - ( - Some(Box::new(IngressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list: interface_list.clone(), - zid, - })), - Some(Box::new(EgressAclEnforcer { - policy_enforcer: self.enforcer.clone(), - interface_list, - zid, - })), - ) + let ingress_interceptor = Box::new(IngressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + interface_list: interface_list.clone(), + zid, + }); + let egress_interceptor = Box::new(EgressAclEnforcer { + policy_enforcer: self.enforcer.clone(), + interface_list: interface_list.clone(), + zid, + }); + match ( + self.enforcer.interface_enabled.ingress, + self.enforcer.interface_enabled.egress, + ) { + (true, true) => (Some(ingress_interceptor), Some(egress_interceptor)), + (true, false) => (Some(ingress_interceptor), None), + (false, true) => (None, Some(egress_interceptor)), + (false, false) => (None, None), + } } Err(e) => { log::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 100626376a..7c472c01d2 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -100,11 +100,18 @@ impl FlowPolicy { } } +#[derive(Default, Debug)] +pub struct InterfaceEnabled { + pub ingress: bool, + pub egress: bool, +} + pub struct PolicyEnforcer { pub(crate) acl_enabled: bool, pub(crate) default_permission: Permission, pub(crate) subject_map: SubjectMap, pub(crate) policy_map: PolicyMap, + pub(crate) interface_enabled: InterfaceEnabled, } #[derive(Debug, Clone)] @@ -120,6 +127,7 @@ impl PolicyEnforcer { default_permission: Permission::Deny, subject_map: SubjectMap::default(), policy_map: PolicyMap::default(), + interface_enabled: InterfaceEnabled::default(), } } @@ -135,6 +143,12 @@ impl PolicyEnforcer { log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); self.policy_map = PolicyMap::default(); self.subject_map = SubjectMap::default(); + if self.default_permission == Permission::Deny { + self.interface_enabled = InterfaceEnabled { + ingress: true, + egress: true, + }; + } } else { let policy_information = self.policy_information_point(rules)?; let subject_map = policy_information.subject_map; @@ -142,19 +156,29 @@ impl PolicyEnforcer { for rule in policy_information.policy_rules { if let Some(index) = subject_map.get(&rule.subject) { - if let Some(single_policy) = main_policy.get_mut(index) { - single_policy - .flow_mut(rule.flow) - .action_mut(rule.action) - .permission_mut(rule.permission) - .insert(keyexpr::new(&rule.key_expr)?, true); + let single_policy = main_policy + .entry(*index) + .or_insert_with(PolicyForSubject::default); + single_policy + .flow_mut(rule.flow) + .action_mut(rule.action) + .permission_mut(rule.permission) + .insert(keyexpr::new(&rule.key_expr)?, true); + + if self.default_permission == Permission::Deny { + self.interface_enabled = InterfaceEnabled { + ingress: true, + egress: true, + }; } else { - let mut pfs = PolicyForSubject::default(); - pfs.flow_mut(rule.flow) - .action_mut(rule.action) - .permission_mut(rule.permission) - .insert(keyexpr::new(&rule.key_expr)?, true); - main_policy.insert(*index, pfs); + match rule.flow { + InterceptorFlow::Ingress => { + self.interface_enabled.ingress = true; + } + InterceptorFlow::Egress => { + self.interface_enabled.egress = true; + } + } } }; } diff --git a/zenoh/src/net/routing/interceptor/downsampling.rs b/zenoh/src/net/routing/interceptor/downsampling.rs index 8cb3b18785..e4df807579 100644 --- a/zenoh/src/net/routing/interceptor/downsampling.rs +++ b/zenoh/src/net/routing/interceptor/downsampling.rs @@ -21,7 +21,7 @@ use crate::net::routing::interceptor::*; use std::collections::HashMap; use std::sync::{Arc, Mutex}; -use zenoh_config::{DownsamplingFlow, DownsamplingItemConf, DownsamplingRuleConf}; +use zenoh_config::{DownsamplingItemConf, DownsamplingRuleConf, InterceptorFlow}; use zenoh_core::zlock; use zenoh_keyexpr::keyexpr_tree::impls::KeyedSetProvider; use zenoh_keyexpr::keyexpr_tree::{support::UnknownWildness, KeBoxTree}; @@ -44,7 +44,7 @@ pub(crate) fn downsampling_interceptor_factories( pub struct DownsamplingInterceptorFactory { interfaces: Option>, rules: Vec, - flow: DownsamplingFlow, + flow: InterceptorFlow, } impl DownsamplingInterceptorFactory { @@ -82,13 +82,13 @@ impl InterceptorFactoryTrait for DownsamplingInterceptorFactory { }; match self.flow { - DownsamplingFlow::Ingress => ( + InterceptorFlow::Ingress => ( Some(Box::new(ComputeOnMiss::new(DownsamplingInterceptor::new( self.rules.clone(), )))), None, ), - DownsamplingFlow::Egress => ( + InterceptorFlow::Egress => ( None, Some(Box::new(ComputeOnMiss::new(DownsamplingInterceptor::new( self.rules.clone(), diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 7fc41a7f6e..7b4479eb59 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -53,7 +53,6 @@ fn test_pub_sub_deny() { .multicast .set_enabled(Some(false)) .unwrap(); - //let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -133,7 +132,6 @@ fn test_pub_sub_allow() { .multicast .set_enabled(Some(false)) .unwrap(); - //let mut config_router = config_router.clone(); config_router .insert_json5( "acl", From 88e1dffc4439092e8585b2a59d66fa7e22825851 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 15:09:57 +0200 Subject: [PATCH 104/122] refactor code for review changes --- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 - testsfiles/DEFAULT_CONFIG-sub.json5 | 16 - testsfiles/DEFAULT_CONFIG-test.json5 | 474 --------------------------- 3 files changed, 508 deletions(-) delete mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 deleted file mode 100644 index 6a039c3ce5..0000000000 --- a/testsfiles/DEFAULT_CONFIG-pub.json5 +++ /dev/null @@ -1,18 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - /// "tcp/192.168.11.1:7447", - "tcp/127.0.0.1:7447", - - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 deleted file mode 100644 index 7aad9f7818..0000000000 --- a/testsfiles/DEFAULT_CONFIG-sub.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - "tcp/127.0.0.1:7448", - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 deleted file mode 100644 index f9e26594ad..0000000000 --- a/testsfiles/DEFAULT_CONFIG-test.json5 +++ /dev/null @@ -1,474 +0,0 @@ -// { -// mode: "router", -// listen: { -// endpoints: [ -// "tcp/127.0.0.1:7447", -// "tcp/127.0.0.1:7448", -// ], -// }, -// scouting: { -// multicast: { -// enabled: false, -// }, -// gossip: { -// enabled: false, -// }, -// }, -// acl: { -// ///[true/false] acl will be activated only if this is set to true -// "enabled": true, -// ///[deny/allow] default permission is deny (even if this is left empty or not specified) -// "default_permission": "allow", -// ///rule set for permissions allowing or denying access to key-expressions -// "rules": -// [ -// { -// "action": [ -// "put", -// "declare_subscriber", -// ], -// "flow":["egress","ingress"], -// "permission": "allow", -// "key_expr": [ -// "test/thr" -// ], -// "interface": [ -// "lo0" -// ] -// }, -// ] -// }, - -// } - - -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - // id: "1234567890abcdef", - - /// The node's mode (router, peer or client) - mode: "router", - - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - - - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: - /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 - - - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: - /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 - listen: { - /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. - timeout_ms: 0, - - endpoints: [ - // "/
" - "tcp/127.0.0.1:7447", - "tcp/127.0.0.1:7448", - ], - - /// Global listen configuration, - /// Accepts a single value or different values for router, peer and client. - /// The configuration can also be specified for the separate endpoint - /// it will override the global one - /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" - - /// exit from application, if timeout exceed - exit_on_failure: true, - /// listen retry configuration - retry: { - /// intial wait timeout until next try - period_init_ms: 1000, - /// maximum wait timeout until next try - period_max_ms: 4000, - /// increase factor for the next timeout until next try - period_increase_factor: 2, - }, - }, - /// Configure the scouting mechanisms and their behaviours - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, - - - - // /// The declarations aggregation strategy. - // aggregation: { - // /// A list of key-expressions for which all included subscribers will be aggregated into. - // subscribers: [ - // // key_expression - // ], - // /// A list of key-expressions for which all included publishers will be aggregated into. - // publishers: [ - // // key_expression - // ], - // }, - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, - // ], - // }, - // ], - /// configure acl rules - acl: { - - ///[true/false] acl will be activated only if this is set to true - "enabled": true, - ///[deny/allow] default permission is deny (even if this is left empty or not specified) - "default_permission": "deny", - ///rule set for permissions allowing or denying access to key-expressions - "rules": - [ - { - "actions": [ - "put","declare_subscriber" - ], - "flows":["egress","ingress"], - "permission": "allow", - "key_exprs": [ - "test/thr" - ], - "interfaces": [ - "lo0" - ] - }, - ] - }, - /// Configure internal transport parameters - transport: { - unicast: { - /// Timeout in milliseconds when opening a link - accept_timeout: 10000, - /// Maximum number of zenoh session in pending state while accepting - accept_pending: 100, - /// Maximum number of sessions that can be simultaneously alive - max_sessions: 1000, - /// Maximum number of incoming links that are admitted per session - max_links: 1, - /// Enables the LowLatency transport - /// This option does not make LowLatency transport mandatory, the actual implementation of transport - /// used will depend on Establish procedure and other party's settings - /// - /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - /// enable 'lowlatency' you need to explicitly disable 'qos'. - lowlatency: false, - /// Enables QoS on unicast communications. - qos: { - enabled: true, - }, - /// Enables compression on unicast communications. - /// Compression capabilities are negotiated during session establishment. - /// If both Zenoh nodes support compression, then compression is activated. - compression: { - enabled: false, - }, - }, - multicast: { - /// Enables QoS on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - qos: { - enabled: false, - }, - /// Enables compression on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - compression: { - enabled: false, - }, - }, - link: { - /// An optional whitelist of protocols to be used for accepting and opening sessions. - /// If not configured, all the supported protocols are automatically whitelisted. - /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] - /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], - /// Configure the zenoh TX parameters of a link - tx: { - /// The resolution in bits to be used for the message sequence numbers. - /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - /// Accepted values: 8bit, 16bit, 32bit, 64bit. - sequence_number_resolution: "32bit", - /// Link lease duration in milliseconds to announce to other zenoh nodes - lease: 10000, - /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - /// messages will be sent at the configured time interval. - /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - /// set the actual keep_alive interval to one fourth of the lease time: i.e. send - /// 4 keep_alive messages in a lease period. Changing the lease time will have the - /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - /// check which considers a link as failed when no messages are received in 3.5 times the - /// target interval. - keep_alive: 4, - /// Batch size in bytes is expressed as a 16bit unsigned integer. - /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - /// The default batch size value is the maximum batch size: 65535. - batch_size: 65535, - /// Each zenoh link has a transmission queue that can be configured - queue: { - /// The size of each priority queue indicates the number of batches a given queue can contain. - /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - /// If qos is false, then only the DATA priority will be allocated. - size: { - control: 1, - real_time: 1, - interactive_high: 1, - interactive_low: 1, - data_high: 2, - data: 4, - data_low: 4, - background: 4, - }, - /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. - /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. - congestion_control: { - /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. - wait_before_drop: 1000 - }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, - }, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, - }, - /// Configure the zenoh RX parameters of a link - rx: { - /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - /// more in-flight data. This is particularly relevant when dealing with large messages. - /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - buffer_size: 65535, - /// Maximum size of the defragmentation buffer at receiver end. - /// Fragmented messages that are larger than the configured size will be dropped. - /// The default value is 1GiB. This would work in most scenarios. - /// NOTE: reduce the value if you are operating on a memory constrained device. - max_message_size: 1073741824, - }, - /// Configure TLS specific parameters - tls: { - /// Path to the certificate of the certificate authority used to validate either the server - /// or the client's keys and certificates, depending on the node's mode. If not specified - /// on router mode then the default WebPKI certificates are used instead. - root_ca_certificate: null, - /// Path to the TLS server private key - server_private_key: null, - /// Path to the TLS server public certificate - server_certificate: null, - /// Client authentication, if true enables mTLS (mutual authentication) - client_auth: false, - /// Path to the TLS client private key - client_private_key: null, - /// Path to the TLS client public certificate - client_certificate: null, - // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - server_name_verification: null, - }, - }, - /// Shared memory configuration - shared_memory: { - enabled: false, - }, - /// Access control configuration - auth: { - /// The configuration of authentification. - /// A password implies a username is required. - usrpwd: { - user: null, - password: null, - /// The path to a file containing the user password dictionary - dictionary_file: null, - }, - pubkey: { - public_key_pem: null, - private_key_pem: null, - public_key_file: null, - private_key_file: null, - key_size: null, - known_keys_file: null, - }, - }, - }, - - /// Configure the Admin Space - /// Unstable: this configuration part works as advertised, but may change in a future release - adminspace: { - // read and/or write permissions on the admin space - permissions: { - read: true, - write: false, - }, - }, - - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, - -} From 00ccb4055344e9b5dd51a4dc3f41297b5f4895b0 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 17:14:34 +0200 Subject: [PATCH 105/122] fix acl tests issue --- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 + testsfiles/DEFAULT_CONFIG-sub.json5 | 16 + testsfiles/DEFAULT_CONFIG-test.json5 | 474 +++++++++++++++++++++++++++ zenoh/tests/acl.rs | 429 +++++------------------- 4 files changed, 585 insertions(+), 352 deletions(-) create mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 create mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 new file mode 100644 index 0000000000..6a039c3ce5 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-pub.json5 @@ -0,0 +1,18 @@ +{ + mode: "client", + connect: { + endpoints: [ + /// "tcp/192.168.11.1:7447", + "tcp/127.0.0.1:7447", + + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 new file mode 100644 index 0000000000..7aad9f7818 --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-sub.json5 @@ -0,0 +1,16 @@ +{ + mode: "client", + connect: { + endpoints: [ + "tcp/127.0.0.1:7448", + ], + }, + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, +} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 new file mode 100644 index 0000000000..f9e26594ad --- /dev/null +++ b/testsfiles/DEFAULT_CONFIG-test.json5 @@ -0,0 +1,474 @@ +// { +// mode: "router", +// listen: { +// endpoints: [ +// "tcp/127.0.0.1:7447", +// "tcp/127.0.0.1:7448", +// ], +// }, +// scouting: { +// multicast: { +// enabled: false, +// }, +// gossip: { +// enabled: false, +// }, +// }, +// acl: { +// ///[true/false] acl will be activated only if this is set to true +// "enabled": true, +// ///[deny/allow] default permission is deny (even if this is left empty or not specified) +// "default_permission": "allow", +// ///rule set for permissions allowing or denying access to key-expressions +// "rules": +// [ +// { +// "action": [ +// "put", +// "declare_subscriber", +// ], +// "flow":["egress","ingress"], +// "permission": "allow", +// "key_expr": [ +// "test/thr" +// ], +// "interface": [ +// "lo0" +// ] +// }, +// ] +// }, + +// } + + +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + + /// The node's mode (router, peer or client) + mode: "router", + + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + + + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + + + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) + /// Accepts a single value or different values for router, peer and client. + timeout_ms: 0, + + endpoints: [ + // "/
" + "tcp/127.0.0.1:7447", + "tcp/127.0.0.1:7448", + ], + + /// Global listen configuration, + /// Accepts a single value or different values for router, peer and client. + /// The configuration can also be specified for the separate endpoint + /// it will override the global one + /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" + + /// exit from application, if timeout exceed + exit_on_failure: true, + /// listen retry configuration + retry: { + /// intial wait timeout until next try + period_init_ms: 1000, + /// maximum wait timeout until next try + period_max_ms: 4000, + /// increase factor for the next timeout until next try + period_increase_factor: 2, + }, + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + multicast: { + enabled: false, + }, + gossip: { + enabled: false, + }, + }, + + + + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// configure acl rules + acl: { + + ///[true/false] acl will be activated only if this is set to true + "enabled": true, + ///[deny/allow] default permission is deny (even if this is left empty or not specified) + "default_permission": "deny", + ///rule set for permissions allowing or denying access to key-expressions + "rules": + [ + { + "actions": [ + "put","declare_subscriber" + ], + "flows":["egress","ingress"], + "permission": "allow", + "key_exprs": [ + "test/thr" + ], + "interfaces": [ + "lo0" + ] + }, + ] + }, + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] + /// For example, to only enable "tls" and "quic": + // protocols: ["tls", "quic"], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive interval to one fourth of the lease time: i.e. send + /// 4 keep_alive messages in a lease period. Changing the lease time will have the + /// keep_alive messages sent more or less often. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// Congestion occurs when the queue is empty (no available batch). + /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. + /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. + congestion_control: { + /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. + wait_before_drop: 1000 + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: null, + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: false, + /// Path to the TLS client private key + client_private_key: null, + /// Path to the TLS client public certificate + client_certificate: null, + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentification. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, + +} diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 7b4479eb59..c676fa626e 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -6,17 +6,14 @@ use zenoh_core::zlock; #[cfg(not(target_os = "windows"))] fn test_acl() { env_logger::init(); - test_pub_sub_allow(); - test_pub_sub_deny(); - test_pub_sub_allow_then_deny(); - test_pub_sub_deny_then_allow(); - test_get_queryable_allow(); - test_get_queryable_allow_then_deny(); - test_get_queryable_deny(); - test_get_queryable_deny_then_allow(); -} -fn test_pub_sub_deny() { + let mut config_router = Config::default(); + config_router.set_mode(Some(WhatAmI::Router)).unwrap(); + config_router + .listen + .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) + .unwrap(); + let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); config_sub @@ -35,24 +32,27 @@ fn test_pub_sub_deny() { .connect .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) .unwrap(); - config_pub .scouting .multicast .set_enabled(Some(false)) .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); + let config_qbl = &config_pub; + let config_get = &config_sub; + + test_pub_sub_allow(&config_router, &config_sub, &config_pub); + test_pub_sub_deny(&config_router, &config_sub, &config_pub); + test_get_queryable_allow(&config_router, config_qbl, config_get); + test_get_queryable_deny(&config_router, config_qbl, config_get); + test_pub_sub_allow_then_deny(&config_router, &config_sub, &config_pub); + test_pub_sub_deny_then_allow(&config_router, &config_sub, &config_pub); + test_get_queryable_allow_then_deny(&config_router, config_qbl, config_get); + test_get_queryable_deny_then_allow(&config_router, config_qbl, config_get); +} + +fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -87,51 +87,10 @@ fn test_pub_sub_deny() { publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); - _subscriber.undeclare().res().unwrap(); - publisher.undeclare().res().unwrap(); - pub_session.close().res().unwrap(); - sub_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_pub_sub_allow() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -169,55 +128,15 @@ fn test_pub_sub_allow() { publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); - _subscriber.undeclare().res().unwrap(); - publisher.undeclare().res().unwrap(); - pub_session.close().res().unwrap(); - sub_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_pub_sub_allow_then_deny() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - +fn test_pub_sub_allow_then_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let acl_js = r#" - { - - "enabled": true, + .insert_json5( + "acl", + r#" + {"enabled": true, "default_permission": "allow", "rules": [ @@ -226,6 +145,7 @@ fn test_pub_sub_allow_then_deny() { "flows": ["egress"], "actions": [ "put", + "declare_subscriber" ], "key_exprs": [ "test/demo" @@ -235,10 +155,11 @@ fn test_pub_sub_allow_then_deny() { ] }, ] - } - "#; - config_router.insert_json5("acl", acl_js).unwrap(); + "#, + ) + .unwrap(); + const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; let _session = zenoh::open(config_router).res().unwrap(); @@ -260,52 +181,15 @@ fn test_pub_sub_allow_then_deny() { publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(*zlock!(received_value), VALUE); - _subscriber.undeclare().res().unwrap(); - publisher.undeclare().res().unwrap(); - pub_session.close().res().unwrap(); - sub_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_pub_sub_deny_then_allow() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); +fn test_pub_sub_deny_then_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { + let mut config_router = config_router.clone(); config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let acl_js = r#" - { "enabled": true, + .insert_json5( + "acl", + r#" + {"enabled": true, "default_permission": "deny", "rules": [ @@ -325,8 +209,9 @@ fn test_pub_sub_deny_then_allow() { }, ] } - "#; - config_router.insert_json5("acl", acl_js).unwrap(); + "#, + ) + .unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; @@ -350,51 +235,11 @@ fn test_pub_sub_deny_then_allow() { publisher.put(VALUE).res().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(*zlock!(received_value), VALUE); - _subscriber.undeclare().res().unwrap(); - publisher.undeclare().res().unwrap(); - pub_session.close().res().unwrap(); - sub_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_get_queryable_deny() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); +fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_get: &Config) { + let mut config_router = config_router.clone(); - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); config_router .insert_json5( "acl", @@ -411,8 +256,6 @@ fn test_get_queryable_deny() { const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let config_qbl = &config_pub; - let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -438,50 +281,11 @@ fn test_get_queryable_deny() { } std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(received_value, VALUE); - _qbl.undeclare().res().unwrap(); - qbl_session.close().res().unwrap(); - get_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_get_queryable_allow() { - let mut config_sub = Config::default(); +fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_get: &Config) { + let mut config_router = config_router.clone(); - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); config_router .insert_json5( "acl", @@ -497,8 +301,6 @@ fn test_get_queryable_allow() { const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let config_qbl = &config_pub; - let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -524,53 +326,19 @@ fn test_get_queryable_allow() { } std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(received_value, VALUE); - _qbl.undeclare().res().unwrap(); - qbl_session.close().res().unwrap(); - get_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_get_queryable_allow_then_deny() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); +fn test_get_queryable_allow_then_deny( + config_router: &Config, + config_qbl: &Config, + config_get: &Config, +) { + let mut config_router = config_router.clone(); config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let acl_js = r#" - { - "enabled": true, + .insert_json5( + "acl", + r#" + {"enabled": true, "default_permission": "allow", "rules": [ @@ -579,25 +347,22 @@ fn test_get_queryable_allow_then_deny() { "flows": ["egress"], "actions": [ "get", - "declare_queryable" - ], + "declare_queryable" ], "key_exprs": [ "test/demo" ], "interfaces": [ - "lo", - "lo0" + "lo","lo0" ] }, ] } - "#; - config_router.insert_json5("acl", acl_js).unwrap(); + "#, + ) + .unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let config_qbl = &config_pub; - let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -623,53 +388,19 @@ fn test_get_queryable_allow_then_deny() { } std::thread::sleep(std::time::Duration::from_secs(1)); assert_ne!(received_value, VALUE); - _qbl.undeclare().res().unwrap(); - qbl_session.close().res().unwrap(); - get_session.close().res().unwrap(); - _session.close().res().unwrap(); } -fn test_get_queryable_deny_then_allow() { - let mut config_sub = Config::default(); - - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - +fn test_get_queryable_deny_then_allow( + config_router: &Config, + config_qbl: &Config, + config_get: &Config, +) { + let mut config_router = config_router.clone(); config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); - let acl_js = r#" - { - "enabled": true, + .insert_json5( + "acl", + r#" + {"enabled": true, "default_permission": "deny", "rules": [ @@ -678,8 +409,7 @@ fn test_get_queryable_deny_then_allow() { "flows": ["egress","ingress"], "actions": [ "get", - "declare_queryable" - ], + "declare_queryable" ], "key_exprs": [ "test/demo" ], @@ -689,13 +419,12 @@ fn test_get_queryable_deny_then_allow() { }, ] } - "#; - config_router.insert_json5("acl", acl_js).unwrap(); + "#, + ) + .unwrap(); const KEY_EXPR: &str = "test/demo"; const VALUE: &str = "zenoh"; - let config_qbl = &config_pub; - let config_get = &config_sub; let _session = zenoh::open(config_router).res().unwrap(); let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); let get_session = zenoh::open(config_get.clone()).res().unwrap(); @@ -721,8 +450,4 @@ fn test_get_queryable_deny_then_allow() { } std::thread::sleep(std::time::Duration::from_secs(1)); assert_eq!(received_value, VALUE); - _qbl.undeclare().res().unwrap(); - qbl_session.close().res().unwrap(); - get_session.close().res().unwrap(); - _session.close().res().unwrap(); } From b9832bfd79d198b66b77f7243bd583151efda5f0 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 17:14:55 +0200 Subject: [PATCH 106/122] fix acl tests issue --- testsfiles/DEFAULT_CONFIG-pub.json5 | 18 - testsfiles/DEFAULT_CONFIG-sub.json5 | 16 - testsfiles/DEFAULT_CONFIG-test.json5 | 474 --------------------------- 3 files changed, 508 deletions(-) delete mode 100644 testsfiles/DEFAULT_CONFIG-pub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-sub.json5 delete mode 100644 testsfiles/DEFAULT_CONFIG-test.json5 diff --git a/testsfiles/DEFAULT_CONFIG-pub.json5 b/testsfiles/DEFAULT_CONFIG-pub.json5 deleted file mode 100644 index 6a039c3ce5..0000000000 --- a/testsfiles/DEFAULT_CONFIG-pub.json5 +++ /dev/null @@ -1,18 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - /// "tcp/192.168.11.1:7447", - "tcp/127.0.0.1:7447", - - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-sub.json5 b/testsfiles/DEFAULT_CONFIG-sub.json5 deleted file mode 100644 index 7aad9f7818..0000000000 --- a/testsfiles/DEFAULT_CONFIG-sub.json5 +++ /dev/null @@ -1,16 +0,0 @@ -{ - mode: "client", - connect: { - endpoints: [ - "tcp/127.0.0.1:7448", - ], - }, - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, -} \ No newline at end of file diff --git a/testsfiles/DEFAULT_CONFIG-test.json5 b/testsfiles/DEFAULT_CONFIG-test.json5 deleted file mode 100644 index f9e26594ad..0000000000 --- a/testsfiles/DEFAULT_CONFIG-test.json5 +++ /dev/null @@ -1,474 +0,0 @@ -// { -// mode: "router", -// listen: { -// endpoints: [ -// "tcp/127.0.0.1:7447", -// "tcp/127.0.0.1:7448", -// ], -// }, -// scouting: { -// multicast: { -// enabled: false, -// }, -// gossip: { -// enabled: false, -// }, -// }, -// acl: { -// ///[true/false] acl will be activated only if this is set to true -// "enabled": true, -// ///[deny/allow] default permission is deny (even if this is left empty or not specified) -// "default_permission": "allow", -// ///rule set for permissions allowing or denying access to key-expressions -// "rules": -// [ -// { -// "action": [ -// "put", -// "declare_subscriber", -// ], -// "flow":["egress","ingress"], -// "permission": "allow", -// "key_expr": [ -// "test/thr" -// ], -// "interface": [ -// "lo0" -// ] -// }, -// ] -// }, - -// } - - -/// This file attempts to list and document available configuration elements. -/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. -/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. -{ - /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) - /// that zenoh runtime will use. - /// If not set, a random unsigned 128bit integer will be used. - /// WARNING: this id must be unique in your zenoh network. - // id: "1234567890abcdef", - - /// The node's mode (router, peer or client) - mode: "router", - - /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ - - - /// Which endpoints to connect to. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: - /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 - - - /// Which endpoints to listen on. E.g. tcp/localhost:7447. - /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, - /// peers, or client can use to establish a zenoh session. - /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: - /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 - listen: { - /// timeout waiting for all listen endpoints (0: no retry, -1: infinite timeout) - /// Accepts a single value or different values for router, peer and client. - timeout_ms: 0, - - endpoints: [ - // "/
" - "tcp/127.0.0.1:7447", - "tcp/127.0.0.1:7448", - ], - - /// Global listen configuration, - /// Accepts a single value or different values for router, peer and client. - /// The configuration can also be specified for the separate endpoint - /// it will override the global one - /// E.g. tcp/192.168.0.1:7447#exit_on_failure=false;retry_period_max_ms=1000" - - /// exit from application, if timeout exceed - exit_on_failure: true, - /// listen retry configuration - retry: { - /// intial wait timeout until next try - period_init_ms: 1000, - /// maximum wait timeout until next try - period_max_ms: 4000, - /// increase factor for the next timeout until next try - period_increase_factor: 2, - }, - }, - /// Configure the scouting mechanisms and their behaviours - scouting: { - multicast: { - enabled: false, - }, - gossip: { - enabled: false, - }, - }, - - - - // /// The declarations aggregation strategy. - // aggregation: { - // /// A list of key-expressions for which all included subscribers will be aggregated into. - // subscribers: [ - // // key_expression - // ], - // /// A list of key-expressions for which all included publishers will be aggregated into. - // publishers: [ - // // key_expression - // ], - // }, - - // /// The downsampling declaration. - // downsampling: [ - // { - // /// A list of network interfaces messages will be processed on, the rest will be passed as is. - // interfaces: [ "wlan0" ], - // /// Data flow messages will be processed on. ("egress" or "ingress") - // flow: "egress", - // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz - // rules: [ - // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, - // ], - // }, - // ], - /// configure acl rules - acl: { - - ///[true/false] acl will be activated only if this is set to true - "enabled": true, - ///[deny/allow] default permission is deny (even if this is left empty or not specified) - "default_permission": "deny", - ///rule set for permissions allowing or denying access to key-expressions - "rules": - [ - { - "actions": [ - "put","declare_subscriber" - ], - "flows":["egress","ingress"], - "permission": "allow", - "key_exprs": [ - "test/thr" - ], - "interfaces": [ - "lo0" - ] - }, - ] - }, - /// Configure internal transport parameters - transport: { - unicast: { - /// Timeout in milliseconds when opening a link - accept_timeout: 10000, - /// Maximum number of zenoh session in pending state while accepting - accept_pending: 100, - /// Maximum number of sessions that can be simultaneously alive - max_sessions: 1000, - /// Maximum number of incoming links that are admitted per session - max_links: 1, - /// Enables the LowLatency transport - /// This option does not make LowLatency transport mandatory, the actual implementation of transport - /// used will depend on Establish procedure and other party's settings - /// - /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. - /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to - /// enable 'lowlatency' you need to explicitly disable 'qos'. - lowlatency: false, - /// Enables QoS on unicast communications. - qos: { - enabled: true, - }, - /// Enables compression on unicast communications. - /// Compression capabilities are negotiated during session establishment. - /// If both Zenoh nodes support compression, then compression is activated. - compression: { - enabled: false, - }, - }, - multicast: { - /// Enables QoS on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - qos: { - enabled: false, - }, - /// Enables compression on multicast communication. - /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. - compression: { - enabled: false, - }, - }, - link: { - /// An optional whitelist of protocols to be used for accepting and opening sessions. - /// If not configured, all the supported protocols are automatically whitelisted. - /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream", "vsock"] - /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], - /// Configure the zenoh TX parameters of a link - tx: { - /// The resolution in bits to be used for the message sequence numbers. - /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. - /// Accepted values: 8bit, 16bit, 32bit, 64bit. - sequence_number_resolution: "32bit", - /// Link lease duration in milliseconds to announce to other zenoh nodes - lease: 10000, - /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive - /// messages will be sent at the configured time interval. - /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, - /// set the actual keep_alive interval to one fourth of the lease time: i.e. send - /// 4 keep_alive messages in a lease period. Changing the lease time will have the - /// keep_alive messages sent more or less often. - /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity - /// check which considers a link as failed when no messages are received in 3.5 times the - /// target interval. - keep_alive: 4, - /// Batch size in bytes is expressed as a 16bit unsigned integer. - /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). - /// The default batch size value is the maximum batch size: 65535. - batch_size: 65535, - /// Each zenoh link has a transmission queue that can be configured - queue: { - /// The size of each priority queue indicates the number of batches a given queue can contain. - /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. - /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, - /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. - /// If qos is false, then only the DATA priority will be allocated. - size: { - control: 1, - real_time: 1, - interactive_high: 1, - interactive_low: 1, - data_high: 2, - data: 4, - data_low: 4, - background: 4, - }, - /// Congestion occurs when the queue is empty (no available batch). - /// Using CongestionControl::Block the caller is blocked until a batch is available and re-insterted into the queue. - /// Using CongestionControl::Drop the message might be dropped, depending on conditions configured here. - congestion_control: { - /// The maximum time in microseconds to wait for an available batch before dropping the message if still no batch is available. - wait_before_drop: 1000 - }, - /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. - /// Higher values lead to a more aggressive batching but it will introduce additional latency. - backoff: 100, - }, - // Number of threads dedicated to transmission - // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) - // threads: 4, - }, - /// Configure the zenoh RX parameters of a link - rx: { - /// Receiving buffer size in bytes for each link - /// The default the rx_buffer_size value is the same as the default batch size: 65335. - /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate - /// more in-flight data. This is particularly relevant when dealing with large messages. - /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. - buffer_size: 65535, - /// Maximum size of the defragmentation buffer at receiver end. - /// Fragmented messages that are larger than the configured size will be dropped. - /// The default value is 1GiB. This would work in most scenarios. - /// NOTE: reduce the value if you are operating on a memory constrained device. - max_message_size: 1073741824, - }, - /// Configure TLS specific parameters - tls: { - /// Path to the certificate of the certificate authority used to validate either the server - /// or the client's keys and certificates, depending on the node's mode. If not specified - /// on router mode then the default WebPKI certificates are used instead. - root_ca_certificate: null, - /// Path to the TLS server private key - server_private_key: null, - /// Path to the TLS server public certificate - server_certificate: null, - /// Client authentication, if true enables mTLS (mutual authentication) - client_auth: false, - /// Path to the TLS client private key - client_private_key: null, - /// Path to the TLS client public certificate - client_certificate: null, - // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - server_name_verification: null, - }, - }, - /// Shared memory configuration - shared_memory: { - enabled: false, - }, - /// Access control configuration - auth: { - /// The configuration of authentification. - /// A password implies a username is required. - usrpwd: { - user: null, - password: null, - /// The path to a file containing the user password dictionary - dictionary_file: null, - }, - pubkey: { - public_key_pem: null, - private_key_pem: null, - public_key_file: null, - private_key_file: null, - key_size: null, - known_keys_file: null, - }, - }, - }, - - /// Configure the Admin Space - /// Unstable: this configuration part works as advertised, but may change in a future release - adminspace: { - // read and/or write permissions on the admin space - permissions: { - read: true, - write: false, - }, - }, - - /// - /// Plugins configurations - /// - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // plugins_search_dirs: [], - // /// Plugins are only loaded if present in the configuration. When starting - // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. - // plugins: { - // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) - // - // /// Plugin settings may contain field `__config__` - // /// - If `__config__` is specified, it's content is merged into plugin configuration - // /// - Properties loaded from `__config__` file overrides existing properties - // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively - // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config - // /// - // /// See below exapmle of plugin configuration using `__config__` property - // - // /// Configure the REST API plugin - // rest: { - // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. - // __required__: true, // defaults to false - // /// load configuration from the file - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // /// http port to answer to rest requests - // http_port: 8000, - // }, - // - // /// Configure the storage manager plugin - // storage_manager: { - // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. - // __path__: [ - // "./target/release/libzenoh_plugin_storage_manager.so", - // "./target/release/libzenoh_plugin_storage_manager.dylib", - // ], - // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup - // backend_search_dirs: [], - // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. - // volumes: { - // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb - // influxdb: { - // url: "https://myinfluxdb.example", - // /// Some plugins may need passwords in their configuration. - // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. - // /// any value held at the key "private" will not be shown in the adminspace. - // private: { - // username: "user1", - // password: "pw1", - // }, - // }, - // influxdb2: { - // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. - // backend: "influxdb", - // private: { - // username: "user2", - // password: "pw2", - // }, - // url: "https://localhost:8086", - // }, - // }, - // - // /// Configure the storages supported by the volumes - // storages: { - // demo: { - // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. - // key_expr: "demo/memory/**", - // /// Storages also need to know which volume will be used to actually store their key-value pairs. - // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. - // volume: "memory", - // }, - // demo2: { - // key_expr: "demo/memory2/**", - // volume: "memory", - // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. - // /// Metadata includes the set of wild card updates and deletions (tombstones). - // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. - // garbage_collection: { - // /// The garbage collection event will be periodic with this duration. - // /// The duration is specified in seconds. - // period: 30, - // /// Metadata older than this parameter will be garbage collected. - // /// The duration is specified in seconds. - // lifespan: 86400, - // }, - // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. - // /// In the absence of this configuration, a normal storage is initialized - // /// Note: all the samples to be stored in replicas should be timestamped - // replica_config: { - // /// Specifying the parameters is optional, by default the values provided will be used. - // /// Time interval between different synchronization attempts in seconds - // publication_interval: 5, - // /// Expected propagation delay of the network in milliseconds - // propagation_delay: 200, - // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. - // /// Higher the frequency of updates, lower the delta should be chosen - // /// To be efficient, delta should be the time containing no more than 100,000 samples - // delta: 1000, - // } - // }, - // demo3: { - // key_expr: "demo/memory3/**", - // volume: "memory", - // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. - // /// If not configured, complete defaults to false. - // complete: "true", - // }, - // influx_demo: { - // key_expr: "demo/influxdb/**", - // /// This prefix will be stripped of the received keys when storing. - // strip_prefix: "demo/influxdb", - // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: - // volume: { - // id: "influxdb", - // db: "example", - // }, - // }, - // influx_demo2: { - // key_expr: "demo/influxdb2/**", - // strip_prefix: "demo/influxdb2", - // volume: { - // id: "influxdb2", - // db: "example", - // }, - // }, - // }, - // }, - // }, - - // /// Plugin configuration example using `__config__` property - // plugins: { - // rest: { - // __config__: "./plugins/zenoh-plugin-rest/config.json5", - // }, - // storage_manager: { - // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", - // } - // }, - -} From 76b76718174820b6762647de0584841ccdd28b1a Mon Sep 17 00:00:00 2001 From: snehilzs Date: Wed, 10 Apr 2024 18:08:04 +0200 Subject: [PATCH 107/122] fix acl tests issue --- zenoh/tests/acl.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index c676fa626e..5d30884a1c 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -13,6 +13,11 @@ fn test_acl() { .listen .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) .unwrap(); + config_router + .scouting + .multicast + .set_enabled(Some(false)) + .unwrap(); let mut config_sub = Config::default(); config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); @@ -52,6 +57,7 @@ fn test_acl() { } fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { + println!("test_pub_sub_deny"); let mut config_router = config_router.clone(); config_router .insert_json5( @@ -90,6 +96,8 @@ fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &C } fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { + println!("test_pub_sub_allow"); + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -131,6 +139,8 @@ fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: & } fn test_pub_sub_allow_then_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { + println!("test_pub_sub_allow_then_deny"); + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -184,6 +194,8 @@ fn test_pub_sub_allow_then_deny(config_router: &Config, config_pub: &Config, con } fn test_pub_sub_deny_then_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { + println!("test_pub_sub_deny_then_allow"); + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -238,6 +250,8 @@ fn test_pub_sub_deny_then_allow(config_router: &Config, config_pub: &Config, con } fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_get: &Config) { + println!("test_get_queryable_deny"); + let mut config_router = config_router.clone(); config_router @@ -284,6 +298,8 @@ fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_g } fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_get: &Config) { + println!("test_get_queryable_allow"); + let mut config_router = config_router.clone(); config_router @@ -333,6 +349,8 @@ fn test_get_queryable_allow_then_deny( config_qbl: &Config, config_get: &Config, ) { + println!("test_get_queryable_allow_then_deny"); + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -395,6 +413,8 @@ fn test_get_queryable_deny_then_allow( config_qbl: &Config, config_get: &Config, ) { + println!("test_get_queryable_deny_then_allow"); + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -409,7 +429,7 @@ fn test_get_queryable_deny_then_allow( "flows": ["egress","ingress"], "actions": [ "get", - "declare_queryable" ], + "declare_queryable"], "key_exprs": [ "test/demo" ], From e2e2f90c0e3560e800a57aa3b965f116f46d5472 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 11 Apr 2024 13:37:19 +0200 Subject: [PATCH 108/122] fix acl tests issue --- .../net/routing/interceptor/authorization.rs | 4 +- zenoh/tests/acl.rs | 554 ++++++------------ 2 files changed, 185 insertions(+), 373 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 7c472c01d2..955417f50b 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -156,9 +156,7 @@ impl PolicyEnforcer { for rule in policy_information.policy_rules { if let Some(index) = subject_map.get(&rule.subject) { - let single_policy = main_policy - .entry(*index) - .or_insert_with(PolicyForSubject::default); + let single_policy = main_policy.entry(*index).or_default(); single_policy .flow_mut(rule.flow) .action_mut(rule.action) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 5d30884a1c..debc67fc87 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,64 +1,59 @@ use std::sync::{Arc, Mutex}; -use zenoh::prelude::sync::*; -use zenoh_config::Config; -use zenoh_core::zlock; -#[test] +use std::time::Duration; +use zenoh::prelude::r#async::*; +use zenoh_core::{zlock, ztimeout}; + +const TIMEOUT: Duration = Duration::from_secs(60); +const SLEEP: Duration = Duration::from_secs(1); +const KEY_EXPR: &str = "test/demo"; +const VALUE: &str = "zenoh"; + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[cfg(not(target_os = "windows"))] -fn test_acl() { +async fn test_acl() { env_logger::init(); + test_pub_sub_allow_then_deny().await; + test_pub_sub_deny_then_allow().await; + test_pub_sub_allow_then_deny().await; + test_pub_sub_deny().await; + test_pub_sub_allow().await; +} - let mut config_router = Config::default(); - config_router.set_mode(Some(WhatAmI::Router)).unwrap(); - config_router - .listen - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_router - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +async fn get_basic_router_config() -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config +} - let mut config_sub = Config::default(); - config_sub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_sub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_sub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +async fn close_router_session(s: Session) { + println!("[ ][01d] Closing router session"); + ztimeout!(s.close().res_async()).unwrap(); +} - let mut config_pub = Config::default(); - config_pub.set_mode(Some(WhatAmI::Client)).unwrap(); - config_pub - .connect - .set_endpoints(vec!["tcp/localhost:7447".parse().unwrap()]) - .unwrap(); - config_pub - .scouting - .multicast - .set_enabled(Some(false)) - .unwrap(); +async fn get_client_sessions() -> (Session, Session) { + // Open the sessions + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + println!("[ ][01a] Opening read session"); + let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + println!("[ ][01a] Opening write session"); + let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + (s01, s02) +} - let config_qbl = &config_pub; - let config_get = &config_sub; - - test_pub_sub_allow(&config_router, &config_sub, &config_pub); - test_pub_sub_deny(&config_router, &config_sub, &config_pub); - test_get_queryable_allow(&config_router, config_qbl, config_get); - test_get_queryable_deny(&config_router, config_qbl, config_get); - test_pub_sub_allow_then_deny(&config_router, &config_sub, &config_pub); - test_pub_sub_deny_then_allow(&config_router, &config_sub, &config_pub); - test_get_queryable_allow_then_deny(&config_router, config_qbl, config_get); - test_get_queryable_deny_then_allow(&config_router, config_qbl, config_get); +async fn close_sessions(s01: Session, s02: Session) { + println!("[ ][01d] Closing read session"); + ztimeout!(s01.close().res_async()).unwrap(); + println!("[ ][02d] Closing write session"); + ztimeout!(s02.close().res_async()).unwrap(); } -fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { +async fn test_pub_sub_deny() { println!("test_pub_sub_deny"); - let mut config_router = config_router.clone(); + + let mut config_router = get_basic_router_config().await; config_router .insert_json5( "acl", @@ -71,40 +66,49 @@ fn test_pub_sub_deny(config_router: &Config, config_pub: &Config, config_sub: &C }"#, ) .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); - let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); - let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let _subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res() - .unwrap(); - - std::thread::sleep(std::time::Duration::from_secs(1)); - publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_ne!(*zlock!(received_value), VALUE); + println!(" Opening router session"); + + let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = pub_session + .declare_publisher(KEY_EXPR) + .res_async() + .await + .unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async() + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + publisher.put(VALUE).res_async().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + } + close_sessions(sub_session, pub_session).await; + close_router_session(_session).await; } -fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { +async fn test_pub_sub_allow() { println!("test_pub_sub_allow"); - - let mut config_router = config_router.clone(); + let mut config_router = get_basic_router_config().await; config_router .insert_json5( "acl", r#"{ - "enabled": true, + "enabled": false, "default_permission": "allow", "rules": [ @@ -113,34 +117,46 @@ fn test_pub_sub_allow(config_router: &Config, config_pub: &Config, config_sub: & }"#, ) .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - - let _session = zenoh::open(config_router).res().unwrap(); - let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); - let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); - let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let _subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res() - .unwrap(); - - std::thread::sleep(std::time::Duration::from_secs(1)); - publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_eq!(*zlock!(received_value), VALUE); + println!(" Opening router session"); + + let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; + + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = pub_session + .declare_publisher(KEY_EXPR) + .res_async() + .await + .unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async() + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + publisher.put(VALUE).res_async().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + } + close_sessions(sub_session, pub_session).await; + close_router_session(_session).await; } -fn test_pub_sub_allow_then_deny(config_router: &Config, config_pub: &Config, config_sub: &Config) { +async fn test_pub_sub_allow_then_deny() { println!("test_pub_sub_allow_then_deny"); + let mut config_router = get_basic_router_config().await; + let mut config_router = config_router.clone(); config_router .insert_json5( @@ -169,34 +185,45 @@ fn test_pub_sub_allow_then_deny(config_router: &Config, config_pub: &Config, con "#, ) .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); - let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); - let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let _subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res() - .unwrap(); - - std::thread::sleep(std::time::Duration::from_secs(1)); - publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_ne!(*zlock!(received_value), VALUE); + println!(" Opening router session"); + + let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; + + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = pub_session + .declare_publisher(KEY_EXPR) + .res_async() + .await + .unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async() + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + publisher.put(VALUE).res_async().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + } + close_sessions(sub_session, pub_session).await; + close_router_session(_session).await; } -fn test_pub_sub_deny_then_allow(config_router: &Config, config_pub: &Config, config_sub: &Config) { +async fn test_pub_sub_deny_then_allow() { println!("test_pub_sub_deny_then_allow"); - let mut config_router = config_router.clone(); + let mut config_router = get_basic_router_config().await; config_router .insert_json5( "acl", @@ -224,250 +251,37 @@ fn test_pub_sub_deny_then_allow(config_router: &Config, config_pub: &Config, con "#, ) .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - - let _session = zenoh::open(config_router).res().unwrap(); - let sub_session = zenoh::open(config_sub.clone()).res().unwrap(); - let pub_session = zenoh::open(config_pub.clone()).res().unwrap(); - let publisher = pub_session.declare_publisher(KEY_EXPR).res().unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let _subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res() - .unwrap(); - - std::thread::sleep(std::time::Duration::from_secs(1)); - publisher.put(VALUE).res().unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_eq!(*zlock!(received_value), VALUE); -} - -fn test_get_queryable_deny(config_router: &Config, config_qbl: &Config, config_get: &Config) { - println!("test_get_queryable_deny"); - - let mut config_router = config_router.clone(); - - config_router - .insert_json5( - "acl", - r#"{ - "enabled": true, - "default_permission": "deny", - "rules": - [ - - ] - }"#, - ) - .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); - let get_session = zenoh::open(config_get.clone()).res().unwrap(); - let mut received_value = String::new(); - let _qbl = qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - sample.reply(Ok(rep)).res().unwrap(); - }) - .res() - .unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); - while let Ok(reply) = recv_reply.recv() { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; - } - Err(e) => println!("Error : {}", e), - } - } - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_ne!(received_value, VALUE); -} - -fn test_get_queryable_allow(config_router: &Config, config_qbl: &Config, config_get: &Config) { - println!("test_get_queryable_allow"); - - let mut config_router = config_router.clone(); - - config_router - .insert_json5( - "acl", - r#"{ - "enabled": true, - "default_permission": "allow", - "rules": - [ - ] - }"#, - ) - .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); - let get_session = zenoh::open(config_get.clone()).res().unwrap(); - let mut received_value = String::new(); - let _qbl = qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - sample.reply(Ok(rep)).res().unwrap(); - }) - .res() - .unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); - while let Ok(reply) = recv_reply.recv() { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; - } - Err(e) => println!("Error : {}", e), - } - } - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_eq!(received_value, VALUE); -} - -fn test_get_queryable_allow_then_deny( - config_router: &Config, - config_qbl: &Config, - config_get: &Config, -) { - println!("test_get_queryable_allow_then_deny"); - - let mut config_router = config_router.clone(); - config_router - .insert_json5( - "acl", - r#" - {"enabled": true, - "default_permission": "allow", - "rules": - [ - { - "permission": "deny", - "flows": ["egress"], - "actions": [ - "get", - "declare_queryable" ], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, - ) - .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); - let get_session = zenoh::open(config_get.clone()).res().unwrap(); - let mut received_value = String::new(); - let _qbl = qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - sample.reply(Ok(rep)).res().unwrap(); - }) - .res() - .unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); - while let Ok(reply) = recv_reply.recv() { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; - } - Err(e) => println!("Error : {}", e), - } - } - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_ne!(received_value, VALUE); -} - -fn test_get_queryable_deny_then_allow( - config_router: &Config, - config_qbl: &Config, - config_get: &Config, -) { - println!("test_get_queryable_deny_then_allow"); - - let mut config_router = config_router.clone(); - config_router - .insert_json5( - "acl", - r#" - {"enabled": true, - "default_permission": "deny", - "rules": - [ - { - "permission": "allow", - "flows": ["egress","ingress"], - "actions": [ - "get", - "declare_queryable"], - "key_exprs": [ - "test/demo" - ], - "interfaces": [ - "lo","lo0" - ] - }, - ] - } - "#, - ) - .unwrap(); - - const KEY_EXPR: &str = "test/demo"; - const VALUE: &str = "zenoh"; - let _session = zenoh::open(config_router).res().unwrap(); - let qbl_session = zenoh::open(config_qbl.clone()).res().unwrap(); - let get_session = zenoh::open(config_get.clone()).res().unwrap(); - let mut received_value = String::new(); - let _qbl = qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - sample.reply(Ok(rep)).res().unwrap(); - }) - .res() - .unwrap(); - std::thread::sleep(std::time::Duration::from_secs(1)); - let recv_reply = get_session.get(KEY_EXPR).res().unwrap(); - while let Ok(reply) = recv_reply.recv() { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; - } - Err(e) => println!("Error : {}", e), - } + println!(" Opening router session"); + + let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; + + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = pub_session + .declare_publisher(KEY_EXPR) + .res_async() + .await + .unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let _subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async() + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + publisher.put(VALUE).res_async().await.unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); } - std::thread::sleep(std::time::Duration::from_secs(1)); - assert_eq!(received_value, VALUE); + close_sessions(sub_session, pub_session).await; + close_router_session(_session).await; } From 92a0c1ff79bdd00eec1bdfa1d4880889f250d79b Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 11 Apr 2024 17:48:26 +0200 Subject: [PATCH 109/122] fix acl tests issue --- zenoh/tests/acl.rs | 359 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 296 insertions(+), 63 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index debc67fc87..20d6731a8d 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,5 +1,6 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; +use tokio::runtime::Handle; use zenoh::prelude::r#async::*; use zenoh_core::{zlock, ztimeout}; @@ -12,13 +13,15 @@ const VALUE: &str = "zenoh"; #[cfg(not(target_os = "windows"))] async fn test_acl() { env_logger::init(); - test_pub_sub_allow_then_deny().await; - test_pub_sub_deny_then_allow().await; - test_pub_sub_allow_then_deny().await; test_pub_sub_deny().await; test_pub_sub_allow().await; + test_pub_sub_deny_then_allow().await; + test_pub_sub_allow_then_deny().await; + test_get_qbl_deny().await; + test_get_qbl_allow().await; + test_get_qbl_allow_then_deny().await; + test_get_qbl_deny_then_allow().await; } - async fn get_basic_router_config() -> Config { let mut config = config::default(); config.set_mode(Some(WhatAmI::Router)).unwrap(); @@ -28,25 +31,22 @@ async fn get_basic_router_config() -> Config { } async fn close_router_session(s: Session) { - println!("[ ][01d] Closing router session"); + println!("Closing router session"); ztimeout!(s.close().res_async()).unwrap(); } async fn get_client_sessions() -> (Session, Session) { - // Open the sessions + println!("Opening client sessions"); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - println!("[ ][01a] Opening read session"); let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - println!("[ ][01a] Opening write session"); let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); (s01, s02) } async fn close_sessions(s01: Session, s02: Session) { - println!("[ ][01d] Closing read session"); + println!("Closing client sessions"); ztimeout!(s01.close().res_async()).unwrap(); - println!("[ ][02d] Closing write session"); ztimeout!(s02.close().res_async()).unwrap(); } @@ -66,9 +66,9 @@ async fn test_pub_sub_deny() { }"#, ) .unwrap(); - println!(" Opening router session"); + println!("Opening router session"); - let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { @@ -79,7 +79,7 @@ async fn test_pub_sub_deny() { .unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session + let subscriber = sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); @@ -90,14 +90,13 @@ async fn test_pub_sub_deny() { .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); tokio::time::sleep(SLEEP).await; - assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); } close_sessions(sub_session, pub_session).await; - close_router_session(_session).await; + close_router_session(session).await; } async fn test_pub_sub_allow() { @@ -117,47 +116,40 @@ async fn test_pub_sub_allow() { }"#, ) .unwrap(); - println!(" Opening router session"); - - let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session + let subscriber = ztimeout!(sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.value.to_string(); }) - .res_async() - .await - .unwrap(); + .res_async()) + .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); } + close_sessions(sub_session, pub_session).await; - close_router_session(_session).await; + close_router_session(session).await; } async fn test_pub_sub_allow_then_deny() { println!("test_pub_sub_allow_then_deny"); let mut config_router = get_basic_router_config().await; - - let mut config_router = config_router.clone(); config_router .insert_json5( "acl", @@ -185,39 +177,33 @@ async fn test_pub_sub_allow_then_deny() { "#, ) .unwrap(); - println!(" Opening router session"); - - let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session + let subscriber = ztimeout!(sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.value.to_string(); }) - .res_async() - .await - .unwrap(); + .res_async()) + .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); tokio::time::sleep(SLEEP).await; assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); } close_sessions(sub_session, pub_session).await; - close_router_session(_session).await; + close_router_session(session).await; } async fn test_pub_sub_deny_then_allow() { @@ -251,37 +237,284 @@ async fn test_pub_sub_deny_then_allow() { "#, ) .unwrap(); - println!(" Opening router session"); - - let _session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; + println!("Opening router session"); + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); let (sub_session, pub_session) = get_client_sessions().await; { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await - .unwrap(); + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); let received_value = Arc::new(Mutex::new(String::new())); let temp_recv_value = received_value.clone(); - let _subscriber = sub_session + let subscriber = ztimeout!(sub_session .declare_subscriber(KEY_EXPR) .callback(move |sample| { let mut temp_value = zlock!(temp_recv_value); *temp_value = sample.value.to_string(); }) - .res_async() - .await - .unwrap(); + .res_async()) + .unwrap(); tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); tokio::time::sleep(SLEEP).await; assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); } close_sessions(sub_session, pub_session).await; - close_router_session(_session).await; + close_router_session(session).await; +} + +async fn test_get_qbl_deny() { + println!("test_get_qbl_deny"); + + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "acl", + r#"{ + "enabled": true, + "default_permission": "deny", + "rules": + [ + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; +} + +async fn test_get_qbl_allow() { + println!("test_get_qbl_allow"); + + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "acl", + r#"{ + "enabled": true, + "default_permission": "allow", + "rules": + [ + ] + }"#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; +} + +async fn test_get_qbl_deny_then_allow() { + println!("test_get_qbl_deny_then_allow"); + + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "acl", + r#" + {"enabled": true, + "default_permission": "deny", + "rules": + [ + { + "permission": "allow", + "flows": ["egress","ingress"], + "actions": [ + "get", + "declare_queryable"], + "key_exprs": [ + "test/demo" + ], + "interfaces": [ + "lo","lo0" + ] + }, + ] + } + "#, + ) + .unwrap(); + + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; +} + +async fn test_get_qbl_allow_then_deny() { + println!("test_get_qbl_allow_then_deny"); + + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "acl", + r#" + {"enabled": true, + "default_permission": "allow", + "rules": + [ + { + "permission": "deny", + "flows": ["egress"], + "actions": [ + "get", + "declare_queryable" ], + "key_exprs": [ + "test/demo" + ], + "interfaces": [ + "lo","lo0" + ] + }, + ] + } + "#, + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); + }); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), + } + } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); + } + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } From 5fd083b4d4588ee6efb9fb52389748a3d744414a Mon Sep 17 00:00:00 2001 From: snehilzs Date: Thu, 11 Apr 2024 21:53:16 +0200 Subject: [PATCH 110/122] resolve merge conflicts --- Cargo.lock | 4 ++-- .../net/routing/interceptor/access_control.rs | 24 +++++++++---------- .../net/routing/interceptor/authorization.rs | 4 ++-- zenoh/tests/acl.rs | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3aa951744..e4c9e55174 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2311,7 +2311,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3082,7 +3082,7 @@ version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.2", "errno 0.3.8", "libc", "linux-raw-sys 0.4.13", diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 5c89394e87..137617a45e 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -56,18 +56,18 @@ pub(crate) fn acl_interceptor_factories( let mut policy_enforcer = PolicyEnforcer::new(); match policy_enforcer.init(acl_config) { Ok(_) => { - log::info!("[ACCESS LOG]: Access control is enabled and initialized"); + tracing::info!("[ACCESS LOG]: Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { enforcer: Arc::new(policy_enforcer), })) } - Err(e) => log::error!( + Err(e) => tracing::error!( "[ACCESS LOG]: Access control enabled but not initialized with error {}!", e ), } } else { - log::info!("[ACCESS LOG]: Access Control is disabled in config!"); + tracing::info!("[ACCESS LOG]: Access Control is disabled in config!"); } Ok(res) @@ -94,7 +94,7 @@ impl InterceptorFactoryTrait for AclEnforcer { } } Err(e) => { - log::error!( + tracing::error!( "[ACCESS LOG]: Couldn't get interface list with error: {}", e ); @@ -122,7 +122,7 @@ impl InterceptorFactoryTrait for AclEnforcer { } } Err(e) => { - log::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); + tracing::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); (None, None) } } @@ -132,12 +132,12 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { - log::debug!("[ACCESS LOG]: Transport Multicast is disabled in interceptor"); + tracing::debug!("[ACCESS LOG]: Transport Multicast is disabled in interceptor"); None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - log::debug!("[ACCESS LOG]: Peer Multicast is disabled in interceptor"); + tracing::debug!("[ACCESS LOG]: Peer Multicast is disabled in interceptor"); None } } @@ -155,7 +155,7 @@ impl InterceptorTrait for IngressAclEnforcer { .and_then(|i| match i.downcast_ref::() { Some(e) => Some(e.as_str()), None => { - log::debug!("[ACCESS LOG]: Cache content was not of type String"); + tracing::debug!("[ACCESS LOG]: Cache content was not of type String"); None } }) @@ -217,7 +217,7 @@ impl InterceptorTrait for EgressAclEnforcer { .and_then(|i| match i.downcast_ref::() { Some(e) => Some(e.as_str()), None => { - log::debug!("[ACCESS LOG]: Cache content was not of type String"); + tracing::debug!("[ACCESS LOG]: Cache content was not of type String"); None } }) @@ -287,17 +287,17 @@ pub trait AclActionMethods { continue; } Err(e) => { - log::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + tracing::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); return Permission::Deny; } } } if decision == Permission::Deny { - log::debug!("[ACCESS LOG]: {} is unauthorized to {}", zid, log_msg); + tracing::debug!("[ACCESS LOG]: {} is unauthorized to {}", zid, log_msg); return Permission::Deny; } - log::trace!("[ACCESS LOG]: {} is authorized to {}", zid, log_msg); + tracing::trace!("[ACCESS LOG]: {} is authorized to {}", zid, log_msg); Permission::Allow } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 955417f50b..69b85ea161 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -140,7 +140,7 @@ impl PolicyEnforcer { if self.acl_enabled { if let Some(rules) = &acl_config.rules { if rules.is_empty() { - log::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); + tracing::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); self.policy_map = PolicyMap::default(); self.subject_map = SubjectMap::default(); if self.default_permission == Permission::Deny { @@ -184,7 +184,7 @@ impl PolicyEnforcer { self.subject_map = subject_map; } } else { - log::warn!("[ACCESS LOG]: No ACL rules have been specified!!!"); + tracing::warn!("[ACCESS LOG]: No ACL rules have been specified!!!"); } } Ok(()) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 20d6731a8d..d8f09630d5 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -12,7 +12,7 @@ const VALUE: &str = "zenoh"; #[tokio::test(flavor = "multi_thread", worker_threads = 4)] #[cfg(not(target_os = "windows"))] async fn test_acl() { - env_logger::init(); + zenoh_util::init_log_from_env(); test_pub_sub_deny().await; test_pub_sub_allow().await; test_pub_sub_deny_then_allow().await; From c24fbf5cefec455932872cc23b3f851521d9e111 Mon Sep 17 00:00:00 2001 From: snehilzs <148767022+snehilzs@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:55:20 +0200 Subject: [PATCH 111/122] Update DEFAULT_CONFIG.json5 Co-authored-by: Alexander --- DEFAULT_CONFIG.json5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index bf0956b1bf..0d14bed685 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -188,7 +188,7 @@ // "actions": [ // "put" // ], - // "flows":["egress","ingress"], + // "flows":["egress","ingress"], // "permission": "allow", // "key_exprs": [ // "test/thr" From ea927a5c0fa946151cf3045cd18f72fcf6bdf5c3 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 15 Apr 2024 17:17:40 +0200 Subject: [PATCH 112/122] change acl to access_control for clarity --- DEFAULT_CONFIG.json5 | 8 ++++---- commons/zenoh-config/src/lib.rs | 4 +++- zenoh/src/net/routing/interceptor/mod.rs | 2 +- zenoh/tests/acl.rs | 17 ++++++++--------- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index 0d14bed685..aba05ef01a 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -175,8 +175,8 @@ // ], // }, // ], - // /// configure acl rules - // acl: { + // /// configure access control (ACL) rules + // access_control: { // ///[true/false] acl will be activated only if this is set to true // "enabled": false, // ///[deny/allow] default permission is deny (even if this is left empty or not specified) @@ -186,12 +186,12 @@ // [ // { // "actions": [ - // "put" + // put, get, declare_subscriber, declare_queryable // ], // "flows":["egress","ingress"], // "permission": "allow", // "key_exprs": [ - // "test/thr" + // "test/demo" // ], // "interfaces": [ // "lo0" diff --git a/commons/zenoh-config/src/lib.rs b/commons/zenoh-config/src/lib.rs index c074c47312..3711cad169 100644 --- a/commons/zenoh-config/src/lib.rs +++ b/commons/zenoh-config/src/lib.rs @@ -497,7 +497,9 @@ validated_struct::validator! { /// Configuration of the downsampling. downsampling: Vec, - pub acl: AclConfig { + + ///Configuration of the access control (ACL) + pub access_control: AclConfig { pub enabled: bool, pub default_permission: Permission, pub rules: Option> diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 544ee1cd4a..ef8e6e0fb1 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult Date: Thu, 11 Apr 2024 21:36:16 +0200 Subject: [PATCH 113/122] fix: Remove sync-lockfiles workflow (#925) --- .github/workflows/sync-lockfiles.yml | 131 --------------------------- 1 file changed, 131 deletions(-) delete mode 100644 .github/workflows/sync-lockfiles.yml diff --git a/.github/workflows/sync-lockfiles.yml b/.github/workflows/sync-lockfiles.yml deleted file mode 100644 index 5240ab403f..0000000000 --- a/.github/workflows/sync-lockfiles.yml +++ /dev/null @@ -1,131 +0,0 @@ -name: Sync Cargo lockfiles - -on: - schedule: - - cron: "0 0 * * *" # At the end of every day - workflow_dispatch: - inputs: - branch: - type: string - description: The branch to sync across all depedant repositories. Defaults to the default branch on each repository - required: false - -defaults: - run: - shell: bash - -jobs: - fetch: - name: Fetch Zenoh's lockfile - runs-on: ubuntu-latest - outputs: - zenoh-head-hash: ${{ steps.info.outputs.head-hash }} - zenoh-head-date: ${{ steps.info.outputs.head-date }} - steps: - - name: Checkout Zenoh - uses: actions/checkout@v4 - with: - repository: eclipse-zenoh/zenoh - ref: ${{ inputs.branch }} - - - id: info - name: Get HEAD info - run: | - echo "head-hash=$(git log -1 --format=%h)" >> $GITHUB_OUTPUT - echo "head-date=$(git log -1 --format=%ad)" >> $GITHUB_OUTPUT - - - name: Upload lockfile - uses: actions/upload-artifact@v3 - with: - name: Cargo.lock - path: Cargo.lock - - sync: - name: Sync Cargo lockfile with ${{ matrix.dependant }} - needs: fetch - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - dependant: - - zenoh-c - - zenoh-python - - zenoh-java - - zenoh-kotlin - - zenoh-plugin-dds - - zenoh-plugin-mqtt - - zenoh-plugin-ros1 - - zenoh-plugin-ros2dds - - zenoh-plugin-webserver - - zenoh-backend-filesystem - - zenoh-backend-influxdb - - zenoh-backend-rocksdb - - zenoh-backend-s3 - steps: - - name: Checkout ${{ matrix.dependant }} - uses: actions/checkout@v4 - with: - repository: eclipse-zenoh/${{ matrix.dependant }} - ref: ${{ inputs.branch }} - submodules: true - token: ${{ secrets.BOT_TOKEN_WORKFLOW }} - - - name: Install Rust toolchain - # NOTE: Showing the active Rust toolchain (defined by the rust-toolchain.toml file) - # will have the side effect of installing it; if it's not installed already. - run: rustup show - - # NOTE: Not all Zenoh dependants have their Cargo manifest and lockfile - # at the repository's toplevel. The only exception being zenoh-kotlin and - # zenoh-java. Thus the need for this ugly workaround. - - name: Compute crate path of ${{ matrix.dependant }} - id: crate-path - run: | - if [[ "${{ matrix.dependant }}" =~ zenoh-(java|kotlin) ]]; then - echo "value=zenoh-jni" >> $GITHUB_OUTPUT - else - echo "value=." >> $GITHUB_OUTPUT - fi - - - name: Override ${{ matrix.dependant }} lockfile with Zenoh's - uses: actions/download-artifact@v3 - with: - name: Cargo.lock - path: ${{ steps.crate-path.outputs.value }} - - - name: Rectify lockfile - # NOTE: Checking the package for errors will rectify the Cargo.lock while preserving - # the dependency versions fetched from source. - run: cargo check --manifest-path ${{ steps.crate-path.outputs.value }}/Cargo.toml - - - name: Create/Update a pull request if the lockfile changed - id: cpr - # NOTE: If there is a pending PR, this action will simply update it with a forced push. - uses: peter-evans/create-pull-request@v6 - with: - title: Sync `Cargo.lock` with `eclipse-zenoh/zenoh@${{ needs.fetch.outputs.zenoh-head-hash }}` from `${{ needs.fetch.outputs.zenoh-head-date }}`" - body: > - This pull request synchronizes ${{ matrix.dependant }}'s Cargo lockfile with zenoh's. - This is done to ensure ABI compatibility between Zenoh applications, backends & plugins. - - - **Zenoh HEAD hash**: eclipse-zenoh/zenoh@${{ needs.fetch.outputs.zenoh-head-hash }} - - **Zenoh HEAD date**: ${{ needs.fetch.outputs.zenoh-head-date }} - - **Workflow run**: [${{ github.run_id }}](https://github.com/eclipse-zenoh/zenoh/actions/runs/${{ github.run_id }}) - commit-message: "chore: Sync Cargo lockfile with Zenoh's" - committer: eclipse-zenoh-bot - author: eclipse-zenoh-bot - base: ${{ inputs.branch }} - branch: eclipse-zenoh-bot/sync-lockfile - delete-branch: true - labels: dependencies - token: ${{ secrets.BOT_TOKEN_WORKFLOW }} - - - name: Enable auto merge for the pull request - if: steps.cpr.outputs.pull-request-operation == 'created' - run: > - gh pr merge "${{ steps.cpr.outputs.pull-request-number }}" - --repo "eclipse-zenoh/${{ matrix.dependant }}" - --squash - --auto - env: - GH_TOKEN: ${{ secrets.BOT_TOKEN_WORKFLOW }} From 3c708903716386568cd80b3d5ed1921290244b18 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 16 Apr 2024 12:41:12 +0200 Subject: [PATCH 114/122] modify actions values in config file --- DEFAULT_CONFIG.json5 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/DEFAULT_CONFIG.json5 b/DEFAULT_CONFIG.json5 index aba05ef01a..974f3588df 100644 --- a/DEFAULT_CONFIG.json5 +++ b/DEFAULT_CONFIG.json5 @@ -186,7 +186,7 @@ // [ // { // "actions": [ - // put, get, declare_subscriber, declare_queryable + // "put", "get", "declare_subscriber", "declare_queryable" // ], // "flows":["egress","ingress"], // "permission": "allow", @@ -341,7 +341,6 @@ shared_memory: { enabled: false, }, - /// Access control configuration auth: { /// The configuration of authentification. /// A password implies a username is required. From 40a2020ed251a25b93a955f03e12ff7c7241587b Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 16 Apr 2024 14:33:15 +0200 Subject: [PATCH 115/122] remove [ACCESS LOG] string from the logs --- .../net/routing/interceptor/access_control.rs | 27 +++++++++---------- .../net/routing/interceptor/authorization.rs | 4 +-- zenoh/tests/acl.rs | 1 + 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 137617a45e..e395bce9ef 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -56,18 +56,18 @@ pub(crate) fn acl_interceptor_factories( let mut policy_enforcer = PolicyEnforcer::new(); match policy_enforcer.init(acl_config) { Ok(_) => { - tracing::info!("[ACCESS LOG]: Access control is enabled and initialized"); + tracing::info!("Access control is enabled and initialized"); res.push(Box::new(AclEnforcer { enforcer: Arc::new(policy_enforcer), })) } Err(e) => tracing::error!( - "[ACCESS LOG]: Access control enabled but not initialized with error {}!", + "Access control enabled but not initialized with error {}!", e ), } } else { - tracing::info!("[ACCESS LOG]: Access Control is disabled in config!"); + tracing::info!("Access Control is disabled in config!"); } Ok(res) @@ -94,10 +94,7 @@ impl InterceptorFactoryTrait for AclEnforcer { } } Err(e) => { - tracing::error!( - "[ACCESS LOG]: Couldn't get interface list with error: {}", - e - ); + tracing::error!("Couldn't get interface list with error: {}", e); return (None, None); } } @@ -122,7 +119,7 @@ impl InterceptorFactoryTrait for AclEnforcer { } } Err(e) => { - tracing::error!("[ACCESS LOG]: Failed to get zid with error :{}", e); + tracing::error!("Failed to get zid with error :{}", e); (None, None) } } @@ -132,12 +129,12 @@ impl InterceptorFactoryTrait for AclEnforcer { &self, _transport: &TransportMulticast, ) -> Option { - tracing::debug!("[ACCESS LOG]: Transport Multicast is disabled in interceptor"); + tracing::debug!("Transport Multicast is disabled in interceptor"); None } fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { - tracing::debug!("[ACCESS LOG]: Peer Multicast is disabled in interceptor"); + tracing::debug!("Peer Multicast is disabled in interceptor"); None } } @@ -155,7 +152,7 @@ impl InterceptorTrait for IngressAclEnforcer { .and_then(|i| match i.downcast_ref::() { Some(e) => Some(e.as_str()), None => { - tracing::debug!("[ACCESS LOG]: Cache content was not of type String"); + tracing::debug!("Cache content was not of type String"); None } }) @@ -217,7 +214,7 @@ impl InterceptorTrait for EgressAclEnforcer { .and_then(|i| match i.downcast_ref::() { Some(e) => Some(e.as_str()), None => { - tracing::debug!("[ACCESS LOG]: Cache content was not of type String"); + tracing::debug!("Cache content was not of type String"); None } }) @@ -287,17 +284,17 @@ pub trait AclActionMethods { continue; } Err(e) => { - tracing::debug!("[ACCESS LOG]: Authorization incomplete due to error {}", e); + tracing::debug!("Authorization incomplete due to error {}", e); return Permission::Deny; } } } if decision == Permission::Deny { - tracing::debug!("[ACCESS LOG]: {} is unauthorized to {}", zid, log_msg); + tracing::debug!("{} is unauthorized to {}", zid, log_msg); return Permission::Deny; } - tracing::trace!("[ACCESS LOG]: {} is authorized to {}", zid, log_msg); + tracing::trace!("{} is authorized to {}", zid, log_msg); Permission::Allow } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 69b85ea161..775ac1493b 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -140,7 +140,7 @@ impl PolicyEnforcer { if self.acl_enabled { if let Some(rules) = &acl_config.rules { if rules.is_empty() { - tracing::warn!("[ACCESS LOG]: ACL ruleset in config file is empty!!!"); + tracing::warn!("ACL ruleset in config file is empty!!!"); self.policy_map = PolicyMap::default(); self.subject_map = SubjectMap::default(); if self.default_permission == Permission::Deny { @@ -184,7 +184,7 @@ impl PolicyEnforcer { self.subject_map = subject_map; } } else { - tracing::warn!("[ACCESS LOG]: No ACL rules have been specified!!!"); + tracing::warn!("No ACL rules have been specified!!!"); } } Ok(()) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 37b1b93ab5..57e6bd89c8 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,3 +1,4 @@ +#![cfg(not(target_os = "windows"))] use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::runtime::Handle; From 648ac0993b26a92f7db7ccd69aab102b1d06c6af Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 14:34:38 +0200 Subject: [PATCH 116/122] Remove [ACCESS LOG] from logging --- zenoh/src/net/routing/interceptor/access_control.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index e395bce9ef..02b12da50b 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -143,6 +143,7 @@ impl InterceptorTrait for IngressAclEnforcer { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { Some(Box::new(key_expr.to_string())) } + fn intercept<'a>( &self, ctx: RoutingContext, From e3ef202f61a39c68f769128096e8d06be5281b74 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 15:06:22 +0200 Subject: [PATCH 117/122] rework access control logging --- .../net/routing/interceptor/access_control.rs | 21 +++++++++++-------- .../net/routing/interceptor/authorization.rs | 4 ++-- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 02b12da50b..f942187d13 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -56,18 +56,15 @@ pub(crate) fn acl_interceptor_factories( let mut policy_enforcer = PolicyEnforcer::new(); match policy_enforcer.init(acl_config) { Ok(_) => { - tracing::info!("Access control is enabled and initialized"); + tracing::debug!("Access control is enabled"); res.push(Box::new(AclEnforcer { enforcer: Arc::new(policy_enforcer), })) } - Err(e) => tracing::error!( - "Access control enabled but not initialized with error {}!", - e - ), + Err(e) => tracing::error!("Access control inizialization error: {}", e), } } else { - tracing::info!("Access Control is disabled in config!"); + tracing::debug!("Access control is disabled"); } Ok(res) @@ -285,17 +282,23 @@ pub trait AclActionMethods { continue; } Err(e) => { - tracing::debug!("Authorization incomplete due to error {}", e); + tracing::debug!( + "{} has an authorization error to {} on {}: {}", + zid, + log_msg, + key_expr, + e + ); return Permission::Deny; } } } if decision == Permission::Deny { - tracing::debug!("{} is unauthorized to {}", zid, log_msg); + tracing::debug!("{} is unauthorized to {} on {}", zid, log_msg, key_expr); return Permission::Deny; } - tracing::trace!("{} is authorized to {}", zid, log_msg); + tracing::trace!("{} is authorized to {} on {}", zid, log_msg, key_expr); Permission::Allow } } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index 775ac1493b..a6df62a4bb 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -140,7 +140,7 @@ impl PolicyEnforcer { if self.acl_enabled { if let Some(rules) = &acl_config.rules { if rules.is_empty() { - tracing::warn!("ACL ruleset in config file is empty!!!"); + tracing::warn!("Access control rules are empty in config file"); self.policy_map = PolicyMap::default(); self.subject_map = SubjectMap::default(); if self.default_permission == Permission::Deny { @@ -184,7 +184,7 @@ impl PolicyEnforcer { self.subject_map = subject_map; } } else { - tracing::warn!("No ACL rules have been specified!!!"); + tracing::warn!("Access control rules are empty in config file"); } } Ok(()) From 93163f6d1505649cf7ef7dd9aa8c821b2c530b9a Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 15:58:29 +0200 Subject: [PATCH 118/122] Add ingress/egress logs --- .../net/routing/interceptor/access_control.rs | 36 ++++++++++++------- .../net/routing/interceptor/authorization.rs | 4 +-- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index f942187d13..7b0cce05cc 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -161,7 +161,7 @@ impl InterceptorTrait for IngressAclEnforcer { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (ingress)", key_expr) == Permission::Deny { return None; } } @@ -169,7 +169,7 @@ impl InterceptorTrait for IngressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (ingress)", key_expr) == Permission::Deny { return None; } } @@ -177,8 +177,11 @@ impl InterceptorTrait for IngressAclEnforcer { body: DeclareBody::DeclareSubscriber(_), .. }) => { - if self.action(Action::DeclareSubscriber, "Declare Subscriber", key_expr) - == Permission::Deny + if self.action( + Action::DeclareSubscriber, + "Declare Subscriber (ingress)", + key_expr, + ) == Permission::Deny { return None; } @@ -187,8 +190,11 @@ impl InterceptorTrait for IngressAclEnforcer { body: DeclareBody::DeclareQueryable(_), .. }) => { - if self.action(Action::DeclareQueryable, "Declare Queryable", key_expr) - == Permission::Deny + if self.action( + Action::DeclareQueryable, + "Declare Queryable (ingress)", + key_expr, + ) == Permission::Deny { return None; } @@ -223,7 +229,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: PushBody::Put(_), .. }) => { - if self.action(Action::Put, "Put", key_expr) == Permission::Deny { + if self.action(Action::Put, "Put (egress)", key_expr) == Permission::Deny { return None; } } @@ -231,7 +237,7 @@ impl InterceptorTrait for EgressAclEnforcer { payload: RequestBody::Query(_), .. }) => { - if self.action(Action::Get, "Get", key_expr) == Permission::Deny { + if self.action(Action::Get, "Get (egress)", key_expr) == Permission::Deny { return None; } } @@ -239,8 +245,11 @@ impl InterceptorTrait for EgressAclEnforcer { body: DeclareBody::DeclareSubscriber(_), .. }) => { - if self.action(Action::DeclareSubscriber, "Declare Subscriber", key_expr) - == Permission::Deny + if self.action( + Action::DeclareSubscriber, + "Declare Subscriber (egress)", + key_expr, + ) == Permission::Deny { return None; } @@ -249,8 +258,11 @@ impl InterceptorTrait for EgressAclEnforcer { body: DeclareBody::DeclareQueryable(_), .. }) => { - if self.action(Action::DeclareQueryable, "Declare Queryable", key_expr) - == Permission::Deny + if self.action( + Action::DeclareQueryable, + "Declare Queryable (egress)", + key_expr, + ) == Permission::Deny { return None; } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index a6df62a4bb..fb0dc8340e 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -63,18 +63,18 @@ struct ActionPolicy { impl ActionPolicy { fn action(&self, action: Action) -> &PermissionPolicy { match action { - Action::DeclareQueryable => &self.declare_queryable, Action::Get => &self.get, Action::Put => &self.put, Action::DeclareSubscriber => &self.declare_subscriber, + Action::DeclareQueryable => &self.declare_queryable, } } fn action_mut(&mut self, action: Action) -> &mut PermissionPolicy { match action { - Action::DeclareQueryable => &mut self.declare_queryable, Action::Get => &mut self.get, Action::Put => &mut self.put, Action::DeclareSubscriber => &mut self.declare_subscriber, + Action::DeclareQueryable => &mut self.declare_queryable, } } } From 79de84c89a42e6c9d31a24e5d58553714ac28dd0 Mon Sep 17 00:00:00 2001 From: snehilzs Date: Tue, 16 Apr 2024 17:05:02 +0200 Subject: [PATCH 119/122] add interface name in access logs --- .../net/routing/interceptor/access_control.rs | 53 ++++++++++++------- .../net/routing/interceptor/authorization.rs | 6 +-- 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index 7b0cce05cc..fc2c5b8d3a 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -36,14 +36,19 @@ use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub struct AclEnforcer { enforcer: Arc, } +#[derive(Clone, Debug)] +pub struct Interface { + id: usize, + name: String, +} struct EgressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + interface_list: Vec, zid: ZenohId, } struct IngressAclEnforcer { policy_enforcer: Arc, - interface_list: Vec, + interface_list: Vec, zid: ZenohId, } @@ -77,15 +82,18 @@ impl InterceptorFactoryTrait for AclEnforcer { ) -> (Option, Option) { match transport.get_zid() { Ok(zid) => { - let mut interface_list: Vec = Vec::new(); + let mut interface_list: Vec = Vec::new(); match transport.get_links() { Ok(links) => { for link in links { let enforcer = self.enforcer.clone(); for face in link.interfaces { - let subject = &Subject::Interface(face); + let subject = &Subject::Interface(face.clone()); if let Some(val) = enforcer.subject_map.get(subject) { - interface_list.push(*val); + interface_list.push(Interface { + id: *val, + name: face, + }); } } } @@ -274,29 +282,44 @@ impl InterceptorTrait for EgressAclEnforcer { } pub trait AclActionMethods { fn policy_enforcer(&self) -> Arc; - fn interface_list(&self) -> Vec; + fn interface_list(&self) -> Vec; fn zid(&self) -> ZenohId; fn flow(&self) -> InterceptorFlow; - fn action(&self, action: Action, log_msg: &str, key_expr: &str) -> Permission { let policy_enforcer = self.policy_enforcer(); let interface_list = self.interface_list(); let zid = self.zid(); let mut decision = policy_enforcer.default_permission; for subject in &interface_list { - match policy_enforcer.policy_decision_point(*subject, self.flow(), action, key_expr) { + match policy_enforcer.policy_decision_point(subject.id, self.flow(), action, key_expr) { Ok(Permission::Allow) => { + tracing::trace!( + "{} on {} is authorized to {} on {}", + zid, + subject.name, + log_msg, + key_expr + ); decision = Permission::Allow; break; } Ok(Permission::Deny) => { + tracing::trace!( + "{} on {} is unauthorized to {} on {}", + zid, + subject.name, + log_msg, + key_expr + ); + decision = Permission::Deny; continue; } Err(e) => { tracing::debug!( - "{} has an authorization error to {} on {}: {}", + "{} on {} has an authorization error to {} on {}: {}", zid, + subject.name, log_msg, key_expr, e @@ -305,13 +328,7 @@ pub trait AclActionMethods { } } } - - if decision == Permission::Deny { - tracing::debug!("{} is unauthorized to {} on {}", zid, log_msg, key_expr); - return Permission::Deny; - } - tracing::trace!("{} is authorized to {} on {}", zid, log_msg, key_expr); - Permission::Allow + decision } } @@ -320,7 +337,7 @@ impl AclActionMethods for EgressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { + fn interface_list(&self) -> Vec { self.interface_list.clone() } @@ -337,7 +354,7 @@ impl AclActionMethods for IngressAclEnforcer { self.policy_enforcer.clone() } - fn interface_list(&self) -> Vec { + fn interface_list(&self) -> Vec { self.interface_list.clone() } diff --git a/zenoh/src/net/routing/interceptor/authorization.rs b/zenoh/src/net/routing/interceptor/authorization.rs index fb0dc8340e..61c1cba217 100644 --- a/zenoh/src/net/routing/interceptor/authorization.rs +++ b/zenoh/src/net/routing/interceptor/authorization.rs @@ -27,8 +27,8 @@ use zenoh_keyexpr::keyexpr_tree::{IKeyExprTree, IKeyExprTreeMut, KeBoxTree}; use zenoh_result::ZResult; type PolicyForSubject = FlowPolicy; -type PolicyMap = HashMap; -type SubjectMap = HashMap; +type PolicyMap = HashMap; +type SubjectMap = HashMap; type KeTreeRule = KeBoxTree; #[derive(Default)] @@ -236,7 +236,7 @@ impl PolicyEnforcer { pub fn policy_decision_point( &self, - subject: i32, + subject: usize, flow: InterceptorFlow, action: Action, key_expr: &str, From b9f7c3e453c7322e1a9ca9d2f0903f97c55d7de1 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 19:01:45 +0200 Subject: [PATCH 120/122] Fix log level --- zenoh/src/net/routing/interceptor/access_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zenoh/src/net/routing/interceptor/access_control.rs b/zenoh/src/net/routing/interceptor/access_control.rs index fc2c5b8d3a..1b0876160a 100644 --- a/zenoh/src/net/routing/interceptor/access_control.rs +++ b/zenoh/src/net/routing/interceptor/access_control.rs @@ -304,7 +304,7 @@ pub trait AclActionMethods { break; } Ok(Permission::Deny) => { - tracing::trace!( + tracing::debug!( "{} on {} is unauthorized to {} on {}", zid, subject.name, From 33ab25148ebe463af3ed2d12cd0db93e47b0f719 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 19:05:39 +0200 Subject: [PATCH 121/122] Add missing header --- zenoh/tests/acl.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 57e6bd89c8..827e1f17c6 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #![cfg(not(target_os = "windows"))] use std::sync::{Arc, Mutex}; use std::time::Duration; From b5617b7c697a4444e2459fdfeea6c2ef22957976 Mon Sep 17 00:00:00 2001 From: Luca Cominardi Date: Tue, 16 Apr 2024 19:17:43 +0200 Subject: [PATCH 122/122] Add missing header files --- zenoh/tests/acl.rs | 730 ++++++++++++++++---------------- zenoh/tests/attachments.rs | 13 + zenoh/tests/connection_retry.rs | 14 +- zenoh/tests/formatters.rs | 15 +- zenoh/tests/interceptors.rs | 13 + 5 files changed, 419 insertions(+), 366 deletions(-) diff --git a/zenoh/tests/acl.rs b/zenoh/tests/acl.rs index 827e1f17c6..fec29515db 100644 --- a/zenoh/tests/acl.rs +++ b/zenoh/tests/acl.rs @@ -11,114 +11,115 @@ // Contributors: // ZettaScale Zenoh Team, // -#![cfg(not(target_os = "windows"))] -use std::sync::{Arc, Mutex}; -use std::time::Duration; -use tokio::runtime::Handle; -use zenoh::prelude::r#async::*; -use zenoh_core::{zlock, ztimeout}; - -const TIMEOUT: Duration = Duration::from_secs(60); -const SLEEP: Duration = Duration::from_secs(1); -const KEY_EXPR: &str = "test/demo"; -const VALUE: &str = "zenoh"; - -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn test_acl() { - zenoh_util::init_log_from_env(); - test_pub_sub_deny().await; - test_pub_sub_allow().await; - test_pub_sub_deny_then_allow().await; - test_pub_sub_allow_then_deny().await; - test_get_qbl_deny().await; - test_get_qbl_allow().await; - test_get_qbl_allow_then_deny().await; - test_get_qbl_deny_then_allow().await; -} -async fn get_basic_router_config() -> Config { - let mut config = config::default(); - config.set_mode(Some(WhatAmI::Router)).unwrap(); - config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; - config.scouting.multicast.set_enabled(Some(false)).unwrap(); - config -} +#![cfg(target_family = "unix")] +mod test { + use std::sync::{Arc, Mutex}; + use std::time::Duration; + use tokio::runtime::Handle; + use zenoh::prelude::r#async::*; + use zenoh_core::{zlock, ztimeout}; + + const TIMEOUT: Duration = Duration::from_secs(60); + const SLEEP: Duration = Duration::from_secs(1); + const KEY_EXPR: &str = "test/demo"; + const VALUE: &str = "zenoh"; + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn test_acl() { + zenoh_util::init_log_from_env(); + test_pub_sub_deny().await; + test_pub_sub_allow().await; + test_pub_sub_deny_then_allow().await; + test_pub_sub_allow_then_deny().await; + test_get_qbl_deny().await; + test_get_qbl_allow().await; + test_get_qbl_allow_then_deny().await; + test_get_qbl_deny_then_allow().await; + } + async fn get_basic_router_config() -> Config { + let mut config = config::default(); + config.set_mode(Some(WhatAmI::Router)).unwrap(); + config.listen.endpoints = vec!["tcp/127.0.0.1:7447".parse().unwrap()]; + config.scouting.multicast.set_enabled(Some(false)).unwrap(); + config + } -async fn close_router_session(s: Session) { - println!("Closing router session"); - ztimeout!(s.close().res_async()).unwrap(); -} + async fn close_router_session(s: Session) { + println!("Closing router session"); + ztimeout!(s.close().res_async()).unwrap(); + } -async fn get_client_sessions() -> (Session, Session) { - println!("Opening client sessions"); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); - let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); - let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); - (s01, s02) -} + async fn get_client_sessions() -> (Session, Session) { + println!("Opening client sessions"); + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let s01 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + let config = config::client(["tcp/127.0.0.1:7447".parse::().unwrap()]); + let s02 = ztimeout!(zenoh::open(config).res_async()).unwrap(); + (s01, s02) + } -async fn close_sessions(s01: Session, s02: Session) { - println!("Closing client sessions"); - ztimeout!(s01.close().res_async()).unwrap(); - ztimeout!(s02.close().res_async()).unwrap(); -} + async fn close_sessions(s01: Session, s02: Session) { + println!("Closing client sessions"); + ztimeout!(s01.close().res_async()).unwrap(); + ztimeout!(s02.close().res_async()).unwrap(); + } -async fn test_pub_sub_deny() { - println!("test_pub_sub_deny"); + async fn test_pub_sub_deny() { + println!("test_pub_sub_deny"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#"{ + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#"{ "enabled": true, "default_permission": "deny", "rules": [ ] }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - - let (sub_session, pub_session) = get_client_sessions().await; - { - let publisher = pub_session - .declare_publisher(KEY_EXPR) - .res_async() - .await + ) .unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res_async() - .await - .unwrap(); - - tokio::time::sleep(SLEEP).await; - publisher.put(VALUE).res_async().await.unwrap(); - tokio::time::sleep(SLEEP).await; - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = pub_session + .declare_publisher(KEY_EXPR) + .res_async() + .await + .unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async() + .await + .unwrap(); + + tokio::time::sleep(SLEEP).await; + publisher.put(VALUE).res_async().await.unwrap(); + tokio::time::sleep(SLEEP).await; + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; -} -async fn test_pub_sub_allow() { - println!("test_pub_sub_allow"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#"{ + async fn test_pub_sub_allow() { + println!("test_pub_sub_allow"); + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#"{ "enabled": false, "default_permission": "allow", @@ -127,46 +128,46 @@ async fn test_pub_sub_allow() { ] }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); - } + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async()) + .unwrap(); - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; -} + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; -async fn test_pub_sub_allow_then_deny() { - println!("test_pub_sub_allow_then_deny"); + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); + } - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#" + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; + } + + async fn test_pub_sub_allow_then_deny() { + println!("test_pub_sub_allow_then_deny"); + + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#" {"enabled": true, "default_permission": "allow", "rules": @@ -188,45 +189,45 @@ async fn test_pub_sub_allow_then_deny() { ] } "#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_ne!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_ne!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; -} -async fn test_pub_sub_deny_then_allow() { - println!("test_pub_sub_deny_then_allow"); + async fn test_pub_sub_deny_then_allow() { + println!("test_pub_sub_deny_then_allow"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#" + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#" {"enabled": true, "default_permission": "deny", "rules": @@ -248,157 +249,157 @@ async fn test_pub_sub_deny_then_allow() { ] } "#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (sub_session, pub_session) = get_client_sessions().await; - { - let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); - let received_value = Arc::new(Mutex::new(String::new())); - let temp_recv_value = received_value.clone(); - let subscriber = ztimeout!(sub_session - .declare_subscriber(KEY_EXPR) - .callback(move |sample| { - let mut temp_value = zlock!(temp_recv_value); - *temp_value = sample.value.to_string(); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - - ztimeout!(publisher.put(VALUE).res_async()).unwrap(); - tokio::time::sleep(SLEEP).await; - - assert_eq!(*zlock!(received_value), VALUE); - ztimeout!(subscriber.undeclare().res_async()).unwrap(); + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let (sub_session, pub_session) = get_client_sessions().await; + { + let publisher = ztimeout!(pub_session.declare_publisher(KEY_EXPR).res_async()).unwrap(); + let received_value = Arc::new(Mutex::new(String::new())); + let temp_recv_value = received_value.clone(); + let subscriber = ztimeout!(sub_session + .declare_subscriber(KEY_EXPR) + .callback(move |sample| { + let mut temp_value = zlock!(temp_recv_value); + *temp_value = sample.value.to_string(); + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + + ztimeout!(publisher.put(VALUE).res_async()).unwrap(); + tokio::time::sleep(SLEEP).await; + + assert_eq!(*zlock!(received_value), VALUE); + ztimeout!(subscriber.undeclare().res_async()).unwrap(); + } + close_sessions(sub_session, pub_session).await; + close_router_session(session).await; } - close_sessions(sub_session, pub_session).await; - close_router_session(session).await; -} -async fn test_get_qbl_deny() { - println!("test_get_qbl_deny"); + async fn test_get_qbl_deny() { + println!("test_get_qbl_deny"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#"{ + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#"{ "enabled": true, "default_permission": "deny", "rules": [ ] }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - - let (get_session, qbl_session) = get_client_sessions().await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); - }); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), } - Err(e) => println!("Error : {}", e), } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); } - tokio::time::sleep(SLEEP).await; - assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; -} -async fn test_get_qbl_allow() { - println!("test_get_qbl_allow"); + async fn test_get_qbl_allow() { + println!("test_get_qbl_allow"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#"{ + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#"{ "enabled": true, "default_permission": "allow", "rules": [ ] }"#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - - let (get_session, qbl_session) = get_client_sessions().await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); - }); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), } - Err(e) => println!("Error : {}", e), } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); } - tokio::time::sleep(SLEEP).await; - assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; -} -async fn test_get_qbl_deny_then_allow() { - println!("test_get_qbl_deny_then_allow"); + async fn test_get_qbl_deny_then_allow() { + println!("test_get_qbl_deny_then_allow"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#" + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#" {"enabled": true, "default_permission": "deny", "rules": @@ -419,57 +420,57 @@ async fn test_get_qbl_deny_then_allow() { ] } "#, - ) - .unwrap(); + ) + .unwrap(); - println!("Opening router session"); + println!("Opening router session"); - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - let (get_session, qbl_session) = get_client_sessions().await; - { - let mut received_value = String::new(); + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); - }); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), } - Err(e) => println!("Error : {}", e), } + tokio::time::sleep(SLEEP).await; + assert_eq!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); } - tokio::time::sleep(SLEEP).await; - assert_eq!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; -} -async fn test_get_qbl_allow_then_deny() { - println!("test_get_qbl_allow_then_deny"); + async fn test_get_qbl_allow_then_deny() { + println!("test_get_qbl_allow_then_deny"); - let mut config_router = get_basic_router_config().await; - config_router - .insert_json5( - "access_control", - r#" + let mut config_router = get_basic_router_config().await; + config_router + .insert_json5( + "access_control", + r#" {"enabled": true, "default_permission": "allow", "rules": @@ -490,44 +491,45 @@ async fn test_get_qbl_allow_then_deny() { ] } "#, - ) - .unwrap(); - println!("Opening router session"); - - let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); - - let (get_session, qbl_session) = get_client_sessions().await; - { - let mut received_value = String::new(); - - let qbl = ztimeout!(qbl_session - .declare_queryable(KEY_EXPR) - .callback(move |sample| { - let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); - tokio::task::block_in_place(move || { - Handle::current().block_on(async move { - ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + ) + .unwrap(); + println!("Opening router session"); + + let session = ztimeout!(zenoh::open(config_router).res_async()).unwrap(); + + let (get_session, qbl_session) = get_client_sessions().await; + { + let mut received_value = String::new(); + + let qbl = ztimeout!(qbl_session + .declare_queryable(KEY_EXPR) + .callback(move |sample| { + let rep = Sample::try_from(KEY_EXPR, VALUE).unwrap(); + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + ztimeout!(sample.reply(Ok(rep)).res_async()).unwrap() + }); }); - }); - }) - .res_async()) - .unwrap(); - - tokio::time::sleep(SLEEP).await; - let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); - while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { - match reply.sample { - Ok(sample) => { - received_value = sample.value.to_string(); - break; + }) + .res_async()) + .unwrap(); + + tokio::time::sleep(SLEEP).await; + let recv_reply = ztimeout!(get_session.get(KEY_EXPR).res_async()).unwrap(); + while let Ok(reply) = ztimeout!(recv_reply.recv_async()) { + match reply.sample { + Ok(sample) => { + received_value = sample.value.to_string(); + break; + } + Err(e) => println!("Error : {}", e), } - Err(e) => println!("Error : {}", e), } + tokio::time::sleep(SLEEP).await; + assert_ne!(received_value, VALUE); + ztimeout!(qbl.undeclare().res_async()).unwrap(); } - tokio::time::sleep(SLEEP).await; - assert_ne!(received_value, VALUE); - ztimeout!(qbl.undeclare().res_async()).unwrap(); + close_sessions(get_session, qbl_session).await; + close_router_session(session).await; } - close_sessions(get_session, qbl_session).await; - close_router_session(session).await; } diff --git a/zenoh/tests/attachments.rs b/zenoh/tests/attachments.rs index d1fbd1086a..0010c9d5af 100644 --- a/zenoh/tests/attachments.rs +++ b/zenoh/tests/attachments.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[cfg(feature = "unstable")] #[test] fn pubsub() { diff --git a/zenoh/tests/connection_retry.rs b/zenoh/tests/connection_retry.rs index fcb071b489..91614fe430 100644 --- a/zenoh/tests/connection_retry.rs +++ b/zenoh/tests/connection_retry.rs @@ -1,5 +1,17 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// use config::ConnectionRetryConf; - use zenoh::prelude::sync::*; #[test] diff --git a/zenoh/tests/formatters.rs b/zenoh/tests/formatters.rs index ae894e44b6..27f34233c4 100644 --- a/zenoh/tests/formatters.rs +++ b/zenoh/tests/formatters.rs @@ -1,5 +1,18 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// #[test] -fn reuse() { +fn kedefine_reuse() { zenoh::kedefine!( pub gkeys: "zenoh/${group:*}/${member:*}", ); diff --git a/zenoh/tests/interceptors.rs b/zenoh/tests/interceptors.rs index e38a64d200..048bb1ae1a 100644 --- a/zenoh/tests/interceptors.rs +++ b/zenoh/tests/interceptors.rs @@ -1,3 +1,16 @@ +// +// Copyright (c) 2024 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// ZettaScale Zenoh Team, +// use std::sync::{Arc, Mutex}; use zenoh_core::zlock;