From a067a77ea0fcb79af65e8c142f33f9e4f0ea044c Mon Sep 17 00:00:00 2001 From: snehilzs Date: Mon, 18 Mar 2024 11:27:57 +0100 Subject: [PATCH] adding basic authID functionality --- .gitignore | 9 +- Cargo.toml | 14 +- ca2.crt | 21 + client.json5 | 433 +++++++++ client_ca.pem | 20 + client_cert.pem | 20 + client_key.pem | 27 + client_quic.json5 | 56 ++ io/zenoh-link-commons/src/lib.rs | 6 + io/zenoh-link-commons/src/unicast.rs | 5 +- .../zenoh-link-quic/src/unicast-modified.rs | 662 ------------- io/zenoh-links/zenoh-link-quic/src/unicast.rs | 91 +- .../zenoh-link-serial/src/unicast.rs | 10 +- io/zenoh-links/zenoh-link-tcp/src/unicast.rs | 8 +- .../zenoh-link-tls/src/unicast-modified.rs | 916 ------------------ io/zenoh-links/zenoh-link-tls/src/unicast.rs | 41 +- io/zenoh-links/zenoh-link-udp/src/unicast.rs | 11 +- .../zenoh-link-unixpipe/src/unix/unicast.rs | 10 +- .../zenoh-link-unixsock_stream/src/unicast.rs | 8 +- io/zenoh-links/zenoh-link-ws/src/unicast.rs | 7 + myed25519.key | 3 + myedserver.crt | 12 + mypub2.crt | 22 + mypub2.key | 28 + mysub.csr | 16 - peer_pub.json5 | 12 + peer_sub.json5 | 12 + pub_client.json5 | 5 +- router.json5 | 4 +- router_quic.json5 | 56 ++ server_ca.pem | 20 + server_cert.pem | 20 + server_key.pem | 27 + sub_client.json5 | 44 +- zenoh/src/net/routing/interceptor/mod.rs | 4 +- .../interceptor/testing_interceptor.rs | 87 ++ 36 files changed, 1103 insertions(+), 1644 deletions(-) create mode 100644 ca2.crt create mode 100644 client.json5 create mode 100644 client_ca.pem create mode 100644 client_cert.pem create mode 100644 client_key.pem create mode 100644 client_quic.json5 delete mode 100644 io/zenoh-links/zenoh-link-quic/src/unicast-modified.rs delete mode 100644 io/zenoh-links/zenoh-link-tls/src/unicast-modified.rs create mode 100644 myed25519.key create mode 100644 myedserver.crt create mode 100644 mypub2.crt create mode 100644 mypub2.key delete mode 100644 mysub.csr create mode 100644 peer_pub.json5 create mode 100644 peer_sub.json5 create mode 100644 router_quic.json5 create mode 100644 server_ca.pem create mode 100644 server_cert.pem create mode 100644 server_key.pem create mode 100644 zenoh/src/net/routing/interceptor/testing_interceptor.rs diff --git a/.gitignore b/.gitignore index 83421ea1ae..9240486e57 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,11 @@ .vscode -cargo-timing*.html \ No newline at end of file +cargo-timing*.html + +# remove secret files + +*.pem +*.crt +*.key +*.json5 \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index d82d8ae7a5..854d68cd8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,14 +71,16 @@ description = "Zenoh: Zero Overhead Pub/sub, Store/Query and Compute." # DEFAULT-FEATURES NOTE: Be careful with default-features and additivity! # (https://github.com/rust-lang/cargo/issues/11329) [workspace.dependencies] + +rustls = { version = "0.21.5", features = ["dangerous_configuration"] } aes = "0.8.2" ahash = "0.8.7" -anyhow = { version = "1.0.69", default-features = false } # Default features are disabled due to usage in no_std crates +anyhow = { version = "1.0.69", default-features = false } # Default features are disabled due to usage in no_std crates async-executor = "1.5.0" async-global-executor = "2.3.1" async-io = "1.13.0" async-rustls = "0.4.0" -async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements +async-std = { version = "=1.12.0", default-features = false } # Default features are disabled due to some crates' requirements async-trait = "0.1.60" base64 = "0.21.4" bincode = "1.3.3" @@ -93,10 +95,10 @@ event-listener = "4.0.0" flume = "0.11" form_urlencoded = "1.1.0" futures = "0.3.25" -futures-util = { version = "0.3.25", default-features = false } # Default features are disabled due to some crates' requirements +futures-util = { version = "0.3.25", default-features = false } # Default features are disabled due to some crates' requirements git-version = "0.3.5" hashbrown = "0.14" -hex = { version = "0.4.3", default-features = false } # Default features are disabled due to usage in no_std crates +hex = { version = "0.4.3", default-features = false } # Default features are disabled due to usage in no_std crates hmac = { version = "0.12.1", features = ["std"] } home = "0.5.4" http-types = "2.12.0" @@ -120,14 +122,14 @@ pnet_datalink = "0.34" proc-macro2 = "1.0.51" quinn = "0.10.1" quote = "1.0.23" -rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates +rand = { version = "0.8.5", default-features = false } # Default features are disabled due to usage in no_std crates rand_chacha = "0.3.1" rcgen = "0.11" regex = "1.7.1" ringbuffer-spsc = "0.1.9" rsa = "0.9" rustc_version = "0.4.0" -rustls = { version = "0.21.5", features = ["dangerous_configuration"] } +#rustls = "0.21.5" rustls-native-certs = "0.7.0" rustls-pemfile = "2.0.0" rustls-webpki = "0.102.0" diff --git a/ca2.crt b/ca2.crt new file mode 100644 index 0000000000..e3a7d77fbd --- /dev/null +++ b/ca2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIULpNcDIKDOM7YxmzXHSjXrlFZ6+kwDQYJKoZIhvcNAQEL +BQAwVTELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGTAXBgNVBAMMEHpzX3Rlc3Rfcm9vdF9jYTIwHhcNMjQw +MzEzMTMxNTM5WhcNMjUwMzEzMTMxNTM5WjBVMQswCQYDVQQGEwJGUjELMAkGA1UE +CAwCSUYxCzAJBgNVBAcMAlBSMREwDwYDVQQKDAh6cywgSW5jLjEZMBcGA1UEAwwQ +enNfdGVzdF9yb290X2NhMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKoQonePPKy4/uEzzF6t0+JYgT+Z1amlAf0Tc/Az2zA9gc0JdSCfYnZL0ie23dXk +wUvmQMPXh6nmQJO+mw1CG/DAGnj2g/u3PxWKdbxj77wyVJI0BIuEKakv6dmugyhP +I7/PKl/H2XGqQvoHFScegIdFhSkk1V9eg5CZZMqkpwI0bLb/yx73Ed5syWFbMtYK +LvGBNZmG1UT7vBo/gkOq7viLMrWpqkSPv+8/DbZCe0FYYWz0sUp8iWi6SmxMCLsT +S5Plm9f8VQErQnoVBUfWIJYvL2FCIx8klJykWQPssH372wupaXDX9EgBV0eedGIH +4PEpP3ovRGia6vM12Lvl4y0CAwEAAaNTMFEwHQYDVR0OBBYEFNs7TdMPDd3HIdTp +hMQ4vmaB0FWsMB8GA1UdIwQYMBaAFNs7TdMPDd3HIdTphMQ4vmaB0FWsMA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJwtREvIwVZZNeaeseHWtaYe +vIYX0G2ONC7tAVO+RajjllisRlsKYvw2bXWBgMADdlDs9sqbKYSfz/A79SHhaEqF +QnPisSHVkTMnYWkeEaeXfxx/GEa1aXt9lEeDaDHCy2cW1cff9gJAXEE96INVAPv1 +8+w9x1AyjicbgkI/+Ldtf5g9we3wdJQXbbxop92R55+MGB73UN2Bru8pESFX+FpK +Z8Bxc9ruYBUfPl1CCmapDp6lHGtGCE6gnZoMjqTZJGWYz8tBdf2zITPkOKAueuX8 +5UVVtJTzpvPziktp0NZSmPzwotjH0glwL7aGIR/+5bS/cP5Je1FfKAIdH31AJmA= +-----END CERTIFICATE----- diff --git a/client.json5 b/client.json5 new file mode 100644 index 0000000000..69cd0f3169 --- /dev/null +++ b/client.json5 @@ -0,0 +1,433 @@ +/// This file attempts to list and document available configuration elements. +/// For a more complete view of the configuration's structure, check out `zenoh/src/config.rs`'s `Config` structure. +/// Note that the values here are correctly typed, but may not be sensible, so copying this file to change only the parts that matter to you is not good practice. +{ + /// The identifier (as unsigned 128bit integer in hexadecimal lowercase - leading zeros are not accepted) + /// that zenoh runtime will use. + /// If not set, a random unsigned 128bit integer will be used. + /// WARNING: this id must be unique in your zenoh network. + // id: "1234567890abcdef", + /// The node's mode (router, peer or client) + mode: "client", + /// The node's metadata (name, location, DNS name, etc.) Arbitrary JSON data not interpreted by zenohd and available in admin space @/router/ + metadata: { + name: "strawberry", + location: "Penny Lane" + }, + /// Which endpoints to connect to. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which router/peer to connect to at startup. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be connected to: + /// E.g. tcp/192.168.0.1:7447#iface=eth0, for connect only if the IP address is reachable via the interface eth0 + connect: { + endpoints: [ + // "/
" + "tls/127.0.0.1:7447" + ], + }, + /// Which endpoints to listen on. E.g. tcp/localhost:7447. + /// By configuring the endpoints, it is possible to tell zenoh which are the endpoints that other routers, + /// peers, or client can use to establish a zenoh session. + /// For TCP/UDP on Linux, it is possible additionally specify the interface to be listened to: + /// E.g. tcp/0.0.0.0:7447#iface=eth0, for listen connection only on eth0 + listen: { + endpoints: [ + // "/
" + ], + }, + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 3000, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 200, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: true, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: true, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: true, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + /// Configuration of data messages timestamps management. + timestamping: { + /// Whether data messages should be timestamped if not already. + /// Accepts a single boolean value or different values for router, peer and client. + enabled: { router: true, peer: false, client: false + }, + /// Whether data messages with timestamps in the future should be dropped or not. + /// If set to false (default), messages with timestamps in the future are retimestamped. + /// Timestamps are ignored if timestamping is disabled. + drop_future_timestamp: false, + }, + /// The default timeout to apply to queries in milliseconds. + queries_default_timeout: 10000, + /// The routing strategy to use and it's configuration. + routing: { + /// The routing strategy to use in routers and it's configuration. + router: { + /// When set to true a router will forward data between two peers + /// directly connected to it if it detects that those peers are not + /// connected to each other. + /// The failover brokering only works if gossip discovery is enabled. + peers_failover_brokering: true, + }, + /// The routing strategy to use in peers and it's configuration. + peer: { + /// The routing strategy to use in peers. ("peer_to_peer" or "linkstate"). + mode: "peer_to_peer", + }, + }, + // /// The declarations aggregation strategy. + // aggregation: { + // /// A list of key-expressions for which all included subscribers will be aggregated into. + // subscribers: [ + // // key_expression + // ], + // /// A list of key-expressions for which all included publishers will be aggregated into. + // publishers: [ + // // key_expression + // ], + // }, + // /// The downsampling declaration. + // downsampling: [ + // { + // /// A list of network interfaces messages will be processed on, the rest will be passed as is. + // interfaces: [ "wlan0" ], + // /// Data flow messages will be processed on. ("egress" or "ingress") + // flow: "egress", + // /// A list of downsampling rules: key_expression and the maximum frequency in Hertz + // rules: [ + // { key_expr: "demo/example/zenoh-rs-pub", freq: 0.1 }, + // ], + // }, + // ], + /// Configure internal transport parameters + transport: { + unicast: { + /// Timeout in milliseconds when opening a link + accept_timeout: 10000, + /// Maximum number of zenoh session in pending state while accepting + accept_pending: 100, + /// Maximum number of sessions that can be simultaneously alive + max_sessions: 1000, + /// Maximum number of incoming links that are admitted per session + max_links: 1, + /// Enables the LowLatency transport + /// This option does not make LowLatency transport mandatory, the actual implementation of transport + /// used will depend on Establish procedure and other party's settings + /// + /// NOTE: Currently, the LowLatency transport doesn't preserve QoS prioritization. + /// NOTE: Due to the note above, 'lowlatency' is incompatible with 'qos' option, so in order to + /// enable 'lowlatency' you need to explicitly disable 'qos'. + lowlatency: false, + /// Enables QoS on unicast communications. + qos: { + enabled: true, + }, + /// Enables compression on unicast communications. + /// Compression capabilities are negotiated during session establishment. + /// If both Zenoh nodes support compression, then compression is activated. + compression: { + enabled: false, + }, + }, + multicast: { + /// Enables QoS on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + qos: { + enabled: false, + }, + /// Enables compression on multicast communication. + /// Default to false for Zenoh-to-Zenoh-Pico out-of-the-box compatibility. + compression: { + enabled: false, + }, + }, + link: { + /// An optional whitelist of protocols to be used for accepting and opening sessions. + /// If not configured, all the supported protocols are automatically whitelisted. + /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] + /// For example, to only enable "tls" and "quic": + protocols: [ + "tls" + ], + /// Configure the zenoh TX parameters of a link + tx: { + /// The resolution in bits to be used for the message sequence numbers. + /// When establishing a session with another Zenoh instance, the lowest value of the two instances will be used. + /// Accepted values: 8bit, 16bit, 32bit, 64bit. + sequence_number_resolution: "32bit", + /// Link lease duration in milliseconds to announce to other zenoh nodes + lease: 10000, + /// Number of keep-alive messages in a link lease duration. If no data is sent, keep alive + /// messages will be sent at the configured time interval. + /// NOTE: In order to consider eventual packet loss and transmission latency and jitter, + /// set the actual keep_alive timeout to one fourth of the lease time. + /// This is in-line with the ITU-T G.8013/Y.1731 specification on continous connectivity + /// check which considers a link as failed when no messages are received in 3.5 times the + /// target interval. + keep_alive: 4, + /// Batch size in bytes is expressed as a 16bit unsigned integer. + /// Therefore, the maximum batch size is 2^16-1 (i.e. 65535). + /// The default batch size value is the maximum batch size: 65535. + batch_size: 65535, + /// Each zenoh link has a transmission queue that can be configured + queue: { + /// The size of each priority queue indicates the number of batches a given queue can contain. + /// The amount of memory being allocated for each queue is then SIZE_XXX * BATCH_SIZE. + /// In the case of the transport link MTU being smaller than the ZN_BATCH_SIZE, + /// then amount of memory being allocated for each queue is SIZE_XXX * LINK_MTU. + /// If qos is false, then only the DATA priority will be allocated. + size: { + control: 1, + real_time: 1, + interactive_high: 1, + interactive_low: 1, + data_high: 2, + data: 4, + data_low: 4, + background: 4, + }, + /// The initial exponential backoff time in nanoseconds to allow the batching to eventually progress. + /// Higher values lead to a more aggressive batching but it will introduce additional latency. + backoff: 100, + }, + // Number of threads dedicated to transmission + // By default, the number of threads is calculated as follows: 1 + ((#cores - 1) / 4) + // threads: 4, + }, + /// Configure the zenoh RX parameters of a link + rx: { + /// Receiving buffer size in bytes for each link + /// The default the rx_buffer_size value is the same as the default batch size: 65335. + /// For very high throughput scenarios, the rx_buffer_size can be increased to accomodate + /// more in-flight data. This is particularly relevant when dealing with large messages. + /// E.g. for 16MiB rx_buffer_size set the value to: 16777216. + buffer_size: 65535, + /// Maximum size of the defragmentation buffer at receiver end. + /// Fragmented messages that are larger than the configured size will be dropped. + /// The default value is 1GiB. This would work in most scenarios. + /// NOTE: reduce the value if you are operating on a memory constrained device. + max_message_size: 1073741824, + }, + /// Configure TLS specific parameters + // tls: { + // server_private_key: "myserver.key", + // server_certificate: "myserver.crt" + // } + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: "ca.crt", + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: true, + /// Path to the TLS client private key + client_private_key: "newmyserver.key", + /// Path to the TLS client public certificate + client_certificate: "newmyserver.crt", + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, + }, + /// Shared memory configuration + shared_memory: { + enabled: false, + }, + /// Access control configuration + auth: { + /// The configuration of authentication. + /// A password implies a username is required. + usrpwd: { + user: null, + password: null, + /// The path to a file containing the user password dictionary + dictionary_file: null, + }, + pubkey: { + public_key_pem: null, + private_key_pem: null, + public_key_file: null, + private_key_file: null, + key_size: null, + known_keys_file: null, + }, + }, + }, + /// Configure the Admin Space + /// Unstable: this configuration part works as advertised, but may change in a future release + adminspace: { + // read and/or write permissions on the admin space + permissions: { + read: true, + write: false, + }, + }, + /// + /// Plugins configurations + /// + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // plugins_search_dirs: [], + // /// Plugins are only loaded if present in the configuration. When starting + // /// Once loaded, they may react to changes in the configuration made through the zenoh instance's adminspace. + // plugins: { + // /// If no `__path__` is given to a plugin, zenohd will automatically search for a shared library matching the plugin's name (here, `libzenoh_plugin_rest.so` would be searched for on linux) + // + // /// Plugin settings may contain field `__config__` + // /// - If `__config__` is specified, it's content is merged into plugin configuration + // /// - Properties loaded from `__config__` file overrides existing properties + // /// - If json objects in loaded file contains `__config__` properties, they are processed recursively + // /// This is used in the 'storcge_manager' which supports subplugins, each with it's own config + // /// + // /// See below exapmle of plugin configuration using `__config__` property + // + // /// Configure the REST API plugin + // rest: { + // /// Setting this option to true allows zenohd to panic should it detect issues with this plugin. Setting it to false politely asks the plugin not to panic. + // __required__: true, // defaults to false + // /// load configuration from the file + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // /// http port to answer to rest requests + // http_port: 8000, + // }, + // + // /// Configure the storage manager plugin + // storage_manager: { + // /// When a path is present, automatic search is disabled, and zenohd will instead select the first path which manages to load. + // __path__: [ + // "./target/release/libzenoh_plugin_storage_manager.so", + // "./target/release/libzenoh_plugin_storage_manager.dylib", + // ], + // /// Directories where plugins configured by name should be looked for. Plugins configured by __path__ are not subject to lookup + // backend_search_dirs: [], + // /// The "memory" volume is always available, but you may create other volumes here, with various backends to support the actual storing. + // volumes: { + // /// An influxdb backend is also available at https://github.com/eclipse-zenoh/zenoh-backend-influxdb + // influxdb: { + // url: "https://myinfluxdb.example", + // /// Some plugins may need passwords in their configuration. + // /// To avoid leaking them through the adminspace, they may be masked behind a privacy barrier. + // /// any value held at the key "private" will not be shown in the adminspace. + // private: { + // username: "user1", + // password: "pw1", + // }, + // }, + // influxdb2: { + // /// A second backend of the same type can be spawned using `__path__`, for examples when different DBs are needed. + // backend: "influxdb", + // private: { + // username: "user2", + // password: "pw2", + // }, + // url: "https://localhost:8086", + // }, + // }, + // + // /// Configure the storages supported by the volumes + // storages: { + // demo: { + // /// Storages always need to know what set of keys they must work with. These sets are defined by a key expression. + // key_expr: "demo/memory/**", + // /// Storages also need to know which volume will be used to actually store their key-value pairs. + // /// The "memory" volume is always available, and doesn't require any per-storage options, so requesting "memory" by string is always sufficient. + // volume: "memory", + // }, + // demo2: { + // key_expr: "demo/memory2/**", + // volume: "memory", + // /// Storage manager plugin handles metadata in order to ensure convergence of distributed storages configured in Zenoh. + // /// Metadata includes the set of wild card updates and deletions (tombstones). + // /// Once the samples are guaranteed to be delivered, the metadata can be garbage collected. + // garbage_collection: { + // /// The garbage collection event will be periodic with this duration. + // /// The duration is specified in seconds. + // period: 30, + // /// Metadata older than this parameter will be garbage collected. + // /// The duration is specified in seconds. + // lifespan: 86400, + // }, + // /// If multiple storages subscribing to the same key_expr should be synchronized, declare them as replicas. + // /// In the absence of this configuration, a normal storage is initialized + // /// Note: all the samples to be stored in replicas should be timestamped + // replica_config: { + // /// Specifying the parameters is optional, by default the values provided will be used. + // /// Time interval between different synchronization attempts in seconds + // publication_interval: 5, + // /// Expected propagation delay of the network in milliseconds + // propagation_delay: 200, + // /// This is the chunk that you would like your data to be divide into in time, in milliseconds. + // /// Higher the frequency of updates, lower the delta should be chosen + // /// To be efficient, delta should be the time containing no more than 100,000 samples + // delta: 1000, + // } + // }, + // demo3: { + // key_expr: "demo/memory3/**", + // volume: "memory", + // /// A complete storage advertises itself as containing all the known keys matching the configured key expression. + // /// If not configured, complete defaults to false. + // complete: "true", + // }, + // influx_demo: { + // key_expr: "demo/influxdb/**", + // /// This prefix will be stripped of the received keys when storing. + // strip_prefix: "demo/influxdb", + // /// influxdb-backed volumes need a bit more configuration, which is passed like-so: + // volume: { + // id: "influxdb", + // db: "example", + // }, + // }, + // influx_demo2: { + // key_expr: "demo/influxdb2/**", + // strip_prefix: "demo/influxdb2", + // volume: { + // id: "influxdb2", + // db: "example", + // }, + // }, + // }, + // }, + // }, + // /// Plugin configuration example using `__config__` property + // plugins: { + // rest: { + // __config__: "./plugins/zenoh-plugin-rest/config.json5", + // }, + // storage_manager: { + // __config__: "./plugins/zenoh-plugin-storage-manager/config.json5", + // } + // }, +} \ No newline at end of file diff --git a/client_ca.pem b/client_ca.pem new file mode 100644 index 0000000000..53648fdb59 --- /dev/null +++ b/client_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIIB42n1ZIkOakwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMwN1oYDzIxMjMw +MzA2MTYwMzA3WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAwNzhkYTcwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIuCq24O4P4Aep5vAVlrIQ7P8+ +uWWgcHIFYa02TmhBUB/hjo0JANCQvAtpVNuQ8NyKPlqnnq1cttePbSYVeA0rrnOs +DcfySAiyGBEY9zMjFfHJtH1wtrPcJEU8XIEY3xUlrAJE2CEuV9dVYgfEEydnvgLc +8Ug0WXSiARjqbnMW3l8jh6bYCp/UpL/gSM4mxdKrgpfyPoweGhlOWXc3RTS7cqM9 +T25acURGOSI6/g8GF0sNE4VZmUvHggSTmsbLeXMJzxDWO+xVehRmbQx3IkG7u++b +QdRwGIJcDNn7zHlDMHtQ0Z1DBV94fZNBwCULhCBB5g20XTGw//S7Fj2FPwyhAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTWfAmQ/BUIQm/9 +/llJJs2jUMWzGzAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzANBgkq +hkiG9w0BAQsFAAOCAQEAvtcZFAELKiTuOiAeYts6zeKxc+nnHCzayDeD/BDCbxGJ +e1n+xdHjLtWGd+/Anc+fvftSYBPTFQqCi84lPiUIln5z/rUxE+ke81hNPIfw2obc +yIg87xCabQpVyEh8s+MV+7YPQ1+fH4FuSi2Fck1FejxkVqN2uOZPvOYUmSTsaVr1 +8SfRnwJNZ9UMRPM2bD4Jkvj0VcL42JM3QkOClOzYW4j/vll2cSs4kx7er27cIoo1 +Ck0v2xSPAiVjg6w65rUQeW6uB5m0T2wyj+wm0At8vzhZPlgS1fKhcmT2dzOq3+oN +R+IdLiXcyIkg0m9N8I17p0ljCSkbrgGMD3bbePRTfg== +-----END CERTIFICATE----- diff --git a/client_cert.pem b/client_cert.pem new file mode 100644 index 0000000000..a2c10ba744 --- /dev/null +++ b/client_cert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIeUtmIdFQznMwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMDc4ZGE3MCAXDTIzMDMwNjE2MDMxOFoYDzIxMjMw +MzA2MTYwMzE4WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCx+oC6ESU3gefJ6oui9J3hB76c2/kDAKNI74cWIXfT +He9DUeKpEDRSbIWVKoGcUfdNQebglxp3jRB+tfx/XU0oZl2m8oewxipiNmdiREUZ +Lazh9DJoNtXkzTqzdQNfwRM+BjjVjx8IpNJV2L2IeTBxWtczFS7ggEHHQLWvYZKj +eCQgGdRwQt0V1pQ5Jt0KKkmFueTCLESvaHs9fHBtrtIhmBm1FpBZqTVUT1vvXqp7 +eIy4yFoR+j9SgWZ5kI+7myl/Bo5mycKzFE+TYiNvOWwdMnT2Uz3CZsQUcExUBd6M +tOT75Kte3yMBJmE16f/YbPItA0Cq4af3yUIxDpKwT28tAgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTWfAmQ/BUIQm/9/llJJs2jUMWzGzAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAG/POnBob0S7iYwsbtI2 +3LTTbRnmseIErtJuJmI9yYzgVIm6sUSKhlIUfAIm4rfRuzE94KFeWR2w9RabxOJD +wjYLLKvQ6rFY5g2AV/J0TwDjYuq0absdaDPZ8MKJ+/lpGYK3Te+CTOfq5FJRFt1q +GOkXAxnNpGg0obeRWRKFiAMHbcw6a8LIMfRjCooo3+uSQGsbVzGxSB4CYo720KcC +9vB1K9XALwzoqCewP4aiQsMY1GWpAmzXJftY3w+lka0e9dBYcdEdOqxSoZb5OBBZ +p5e60QweRuJsb60aUaCG8HoICevXYK2fFqCQdlb5sIqQqXyN2K6HuKAFywsjsGyJ +abY= +-----END CERTIFICATE----- diff --git a/client_key.pem b/client_key.pem new file mode 100644 index 0000000000..8e5f66ee3c --- /dev/null +++ b/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsfqAuhElN4HnyeqLovSd4Qe+nNv5AwCjSO+HFiF30x3vQ1Hi +qRA0UmyFlSqBnFH3TUHm4Jcad40QfrX8f11NKGZdpvKHsMYqYjZnYkRFGS2s4fQy +aDbV5M06s3UDX8ETPgY41Y8fCKTSVdi9iHkwcVrXMxUu4IBBx0C1r2GSo3gkIBnU +cELdFdaUOSbdCipJhbnkwixEr2h7PXxwba7SIZgZtRaQWak1VE9b716qe3iMuMha +Efo/UoFmeZCPu5spfwaOZsnCsxRPk2IjbzlsHTJ09lM9wmbEFHBMVAXejLTk++Sr +Xt8jASZhNen/2GzyLQNAquGn98lCMQ6SsE9vLQIDAQABAoIBAGQkKggHm6Q20L+4 +2+bNsoOqguLplpvM4RMpyx11qWE9h6GeUmWD+5yg+SysJQ9aw0ZSHWEjRD4ePji9 +lxvm2IIxzuIftp+NcM2gBN2ywhpfq9XbO/2NVR6PJ0dQQJzBG12bzKDFDdYkP0EU +WdiPL+WoEkvo0F57bAd77n6G7SZSgxYekBF+5S6rjbu5I1cEKW+r2vLehD4uFCVX +Q0Tu7TyIOE1KJ2anRb7ZXVUaguNj0/Er7EDT1+wN8KJKvQ1tYGIq/UUBtkP9nkOI +9XJd25k6m5AQPDddzd4W6/5+M7kjyVPi3CsQcpBPss6ueyecZOMaKqdWAHeEyaak +r67TofUCgYEA6GBa+YkRvp0Ept8cd5mh4gCRM8wUuhtzTQnhubCPivy/QqMWScdn +qD0OiARLAsqeoIfkAVgyqebVnxwTrKTvWe0JwpGylEVWQtpGz3oHgjST47yZxIiY +CSAaimi2CYnJZ+QB2oBkFVwNCuXdPEGX6LgnOGva19UKrm6ONsy6V9MCgYEAxBJu +fu4dGXZreARKEHa/7SQjI9ayAFuACFlON/EgSlICzQyG/pumv1FsMEiFrv6w7PRj +4AGqzyzGKXWVDRMrUNVeGPSKJSmlPGNqXfPaXRpVEeB7UQhAs5wyMrWDl8jEW7Ih +XcWhMLn1f/NOAKyrSDSEaEM+Nuu+xTifoAghvP8CgYEAlta9Fw+nihDIjT10cBo0 +38w4dOP7bFcXQCGy+WMnujOYPzw34opiue1wOlB3FIfL8i5jjY/fyzPA5PhHuSCT +Ec9xL3B9+AsOFHU108XFi/pvKTwqoE1+SyYgtEmGKKjdKOfzYA9JaCgJe1J8inmV +jwXCx7gTJVjwBwxSmjXIm+sCgYBQF8NhQD1M0G3YCdCDZy7BXRippCL0OGxVfL2R +5oKtOVEBl9NxH/3+evE5y/Yn5Mw7Dx3ZPHUcygpslyZ6v9Da5T3Z7dKcmaVwxJ+H +n3wcugv0EIHvOPLNK8npovINR6rGVj6BAqD0uZHKYYYEioQxK5rGyGkaoDQ+dgHm +qku12wKBgQDem5FvNp5iW7mufkPZMqf3sEGtu612QeqejIPFM1z7VkUgetsgPBXD +tYsqC2FtWzY51VOEKNpnfH7zH5n+bjoI9nAEAW63TK9ZKkr2hRGsDhJdGzmLfQ7v +F6/CuIw9EsAq6qIB8O88FXQqald+BZOx6AzB8Oedsz/WtMmIEmr/+Q== +-----END RSA PRIVATE KEY----- diff --git a/client_quic.json5 b/client_quic.json5 new file mode 100644 index 0000000000..3223a9c49f --- /dev/null +++ b/client_quic.json5 @@ -0,0 +1,56 @@ +{ + mode: "peer", + /// Configure the scouting mechanisms and their behaviours + scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 0, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 0, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: false, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: false, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: false, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + "transport": { + "link": { + "protocols": [ + "quic", + //"tls" + ], + "tls": { + "root_ca_certificate": "server_ca.pem", + "client_auth": false, + "client_private_key": "client_key.pem", + "client_certificate": "client_cert.pem" + } + } + } +} \ No newline at end of file diff --git a/io/zenoh-link-commons/src/lib.rs b/io/zenoh-link-commons/src/lib.rs index 0a43aac3d9..12b4f0cabb 100644 --- a/io/zenoh-link-commons/src/lib.rs +++ b/io/zenoh-link-commons/src/lib.rs @@ -48,6 +48,7 @@ pub struct Link { pub is_reliable: bool, pub is_streamed: bool, pub interfaces: Vec, + pub auth_identifier: AuthIdentifier, } #[async_trait] @@ -77,6 +78,7 @@ impl From<&LinkUnicast> for Link { is_reliable: link.is_reliable(), is_streamed: link.is_streamed(), interfaces: link.get_interface_names(), + auth_identifier: link.get_auth_identifier(), } } } @@ -97,6 +99,10 @@ impl From<&LinkMulticast> for Link { is_reliable: link.is_reliable(), is_streamed: false, interfaces: vec![], + auth_identifier: AuthIdentifier { + username: None, + tls_cert_name: None, + }, } } } diff --git a/io/zenoh-link-commons/src/unicast.rs b/io/zenoh-link-commons/src/unicast.rs index 8aa40103cc..150169c5d3 100644 --- a/io/zenoh-link-commons/src/unicast.rs +++ b/io/zenoh-link-commons/src/unicast.rs @@ -19,6 +19,7 @@ use core::{ hash::{Hash, Hasher}, ops::Deref, }; +use serde::Serialize; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::ZResult; @@ -47,6 +48,7 @@ pub trait LinkUnicastTrait: Send + Sync { fn is_reliable(&self) -> bool; fn is_streamed(&self) -> bool; fn get_interface_names(&self) -> Vec; + fn get_auth_identifier(&self) -> AuthIdentifier; async fn write(&self, buffer: &[u8]) -> ZResult; async fn write_all(&self, buffer: &[u8]) -> ZResult<()>; async fn read(&self, buffer: &mut [u8]) -> ZResult; @@ -115,7 +117,8 @@ pub fn get_ip_interface_names(addr: &SocketAddr) -> Vec { } } -#[derive(Debug)] +#[derive(Clone, Debug, Serialize, Hash, PartialEq, Eq)] + pub struct AuthIdentifier { pub username: Option, pub tls_cert_name: Option, diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast-modified.rs b/io/zenoh-links/zenoh-link-quic/src/unicast-modified.rs deleted file mode 100644 index 88e3befe6c..0000000000 --- a/io/zenoh-links/zenoh-link-quic/src/unicast-modified.rs +++ /dev/null @@ -1,662 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// - -use x509_parser::prelude::*; - -use crate::base64_decode; -use crate::{ - config::*, get_quic_addr, verify::WebPkiVerifierAnyServerName, ALPN_QUIC_HTTP, - QUIC_ACCEPT_THROTTLE_TIME, QUIC_DEFAULT_MTU, QUIC_LOCATOR_PREFIX, -}; -use async_std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use async_std::prelude::FutureExt; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; -use async_std::task::JoinHandle; -use async_trait::async_trait; -use rustls::{Certificate, PrivateKey}; -use rustls_pemfile::Item; -use std::collections::HashMap; -use std::fmt; -use std::io::BufReader; -use std::net::IpAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; -use std::time::Duration; -use zenoh_core::{zasynclock, zread, zwrite}; -use zenoh_link_commons::{ - AuthIdentifier, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, -}; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZError, ZResult}; -use zenoh_sync::Signal; - -pub struct LinkUnicastQuic { - connection: quinn::Connection, - src_addr: SocketAddr, - src_locator: Locator, - dst_locator: Locator, - send: AsyncMutex, - recv: AsyncMutex, -} - -impl LinkUnicastQuic { - fn new( - connection: quinn::Connection, - src_addr: SocketAddr, - dst_locator: Locator, - send: quinn::SendStream, - recv: quinn::RecvStream, - ) -> LinkUnicastQuic { - // Build the Quic object - LinkUnicastQuic { - connection, - src_addr, - src_locator: Locator::new(QUIC_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), - dst_locator, - send: AsyncMutex::new(send), - recv: AsyncMutex::new(recv), - } - } -} - -#[async_trait] -impl LinkUnicastTrait for LinkUnicastQuic { - async fn close(&self) -> ZResult<()> { - log::trace!("Closing QUIC link: {}", self); - // Flush the QUIC stream - let mut guard = zasynclock!(self.send); - if let Err(e) = guard.finish().await { - log::trace!("Error closing QUIC stream {}: {}", self, e); - } - self.connection.close(quinn::VarInt::from_u32(0), &[0]); - Ok(()) - } - - async fn write(&self, buffer: &[u8]) -> ZResult { - let mut guard = zasynclock!(self.send); - guard.write(buffer).await.map_err(|e| { - log::trace!("Write error on QUIC link {}: {}", self, e); - zerror!(e).into() - }) - } - - async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { - let mut guard = zasynclock!(self.send); - guard.write_all(buffer).await.map_err(|e| { - log::trace!("Write error on QUIC link {}: {}", self, e); - zerror!(e).into() - }) - } - - async fn read(&self, buffer: &mut [u8]) -> ZResult { - let mut guard = zasynclock!(self.recv); - guard - .read(buffer) - .await - .map_err(|e| { - let e = zerror!("Read error on QUIC link {}: {}", self, e); - log::trace!("{}", &e); - e - })? - .ok_or_else(|| { - let e = zerror!( - "Read error on QUIC link {}: stream {} has been closed", - self, - guard.id() - ); - log::trace!("{}", &e); - e.into() - }) - } - - async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { - let mut guard = zasynclock!(self.recv); - guard.read_exact(buffer).await.map_err(|e| { - let e = zerror!("Read error on QUIC link {}: {}", self, e); - log::trace!("{}", &e); - e.into() - }) - } - - #[inline(always)] - fn get_src(&self) -> &Locator { - &self.src_locator - } - - #[inline(always)] - fn get_dst(&self) -> &Locator { - &self.dst_locator - } - - #[inline(always)] - fn get_mtu(&self) -> u16 { - *QUIC_DEFAULT_MTU - } - - #[inline(always)] - fn get_interface_names(&self) -> Vec { - // @TODO: Not supported for now - log::debug!("The get_interface_names for LinkUnicastQuic is not supported"); - vec![] - } - - #[inline(always)] - fn is_reliable(&self) -> bool { - true - } - - #[inline(always)] - fn is_streamed(&self) -> bool { - true - } -} - -impl Drop for LinkUnicastQuic { - fn drop(&mut self) { - self.connection.close(quinn::VarInt::from_u32(0), &[0]); - } -} - -impl fmt::Display for LinkUnicastQuic { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{} => {}", - self.src_addr, - self.connection.remote_address() - )?; - Ok(()) - } -} - -impl fmt::Debug for LinkUnicastQuic { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Quic") - .field("src", &self.src_addr) - .field("dst", &self.connection.remote_address()) - .finish() - } -} - -/*************************************/ -/* LISTENER */ -/*************************************/ -struct ListenerUnicastQuic { - endpoint: EndPoint, - active: Arc, - signal: Signal, - handle: JoinHandle>, -} - -impl ListenerUnicastQuic { - fn new( - endpoint: EndPoint, - active: Arc, - signal: Signal, - handle: JoinHandle>, - ) -> ListenerUnicastQuic { - ListenerUnicastQuic { - endpoint, - active, - signal, - handle, - } - } -} - -pub struct LinkManagerUnicastQuic { - manager: NewLinkChannelSender, - listeners: Arc>>, -} - -impl LinkManagerUnicastQuic { - pub fn new(manager: NewLinkChannelSender) -> Self { - Self { - manager, - listeners: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -#[async_trait] -impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { - async fn new_link(&self, endpoint: EndPoint) -> ZResult { - println!("this is called when connecting via quic"); - let epaddr = endpoint.address(); - let host = epaddr - .as_str() - .split(':') - .next() - .ok_or("Endpoints must be of the form quic/
:")?; - let epconf = endpoint.config(); - - let addr = get_quic_addr(&epaddr).await?; - - let server_name_verification: bool = epconf - .get(TLS_SERVER_NAME_VERIFICATION) - .unwrap_or(TLS_SERVER_NAME_VERIFICATION_DEFAULT) - .parse()?; - - if !server_name_verification { - log::warn!("Skipping name verification of servers"); - } - - // Initialize the QUIC connection - let mut root_cert_store = rustls::RootCertStore::empty(); - - // Read the certificates - let f = if let Some(value) = epconf.get(TLS_ROOT_CA_CERTIFICATE_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_certificate) = epconf.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { - base64_decode(b64_certificate)? - } else if let Some(value) = epconf.get(TLS_ROOT_CA_CERTIFICATE_FILE) { - async_std::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - vec![] - }; - - let certificates = if f.is_empty() { - rustls_native_certs::load_native_certs() - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - .drain(..) - .map(|x| rustls::Certificate(x.to_vec())) - .collect::>() - } else { - rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map(|result| { - result - .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()? - }; - for c in certificates.iter() { - root_cert_store.add(c).map_err(|e| zerror!("{}", e))?; - } - - let client_crypto = rustls::ClientConfig::builder().with_safe_defaults(); - - let mut client_crypto = if server_name_verification { - client_crypto - .with_root_certificates(root_cert_store) - .with_no_client_auth() - } else { - client_crypto - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_no_client_auth() - }; - - client_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); - - let ip_addr: IpAddr = if addr.is_ipv4() { - Ipv4Addr::UNSPECIFIED.into() - } else { - Ipv6Addr::UNSPECIFIED.into() - }; - - let mut quic_endpoint = quinn::Endpoint::client(SocketAddr::new(ip_addr, 0)) - .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - - quic_endpoint.set_default_client_config(quinn::ClientConfig::new(Arc::new(client_crypto))); - - let src_addr = quic_endpoint - .local_addr() - .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - println!("tried to make quic_connection"); - - let quic_conn = quic_endpoint - .connect(addr, host) - .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))? - .await - .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - println!("was able to make quic_connection"); - - let (send, recv) = quic_conn - .open_bi() - .await - .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; - - // let pi = &quic_conn.peer_identity().unwrap(); - // match pi.downcast_ref::>() { - // Some(serv_certs) => { - // println!("the server certs were found"); - // for item in serv_certs { - // let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); - // let subject_name = &cert - // .subject - // .iter_common_name() - // .next() - // .and_then(|cn| cn.as_str().ok()) - // .unwrap(); - // let auth_identifier = AuthIdentifier { - // username: None, - // tls_cert_name: Some(subject_name.to_string()), - // }; - - // println!("auth_identifier: {:?}", auth_identifier); - // } - // } - // None => { - // println!("no server certs found"); - // } - // } - - let link = Arc::new(LinkUnicastQuic::new( - quic_conn, - src_addr, - endpoint.into(), - send, - recv, - )); - - Ok(LinkUnicast(link)) - } - - async fn new_listener(&self, mut endpoint: EndPoint) -> ZResult { - let epaddr = endpoint.address(); - let epconf = endpoint.config(); - - if epconf.is_empty() { - bail!("No QUIC configuration provided"); - }; - - let addr = get_quic_addr(&epaddr).await?; - - let f = if let Some(value) = epconf.get(TLS_SERVER_CERTIFICATE_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_certificate) = epconf.get(TLS_SERVER_CERTIFICATE_BASE64) { - base64_decode(b64_certificate)? - } else if let Some(value) = epconf.get(TLS_SERVER_CERTIFICATE_FILE) { - async_std::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - bail!("No QUIC CA certificate has been provided."); - }; - let certificates = rustls_pemfile::certs(&mut BufReader::new(f.as_slice())) - .map(|result| { - result - .map_err(|err| zerror!("Invalid QUIC CA certificate file: {}", err)) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - // Private keys - let f = if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_RAW) { - value.as_bytes().to_vec() - } else if let Some(b64_key) = epconf.get(TLS_SERVER_PRIVATE_KEY_BASE64) { - base64_decode(b64_key)? - } else if let Some(value) = epconf.get(TLS_SERVER_PRIVATE_KEY_FILE) { - async_std::fs::read(value) - .await - .map_err(|e| zerror!("Invalid QUIC CA certificate file: {}", e))? - } else { - bail!("No QUIC CA private key has been provided."); - }; - let items: Vec = rustls_pemfile::read_all(&mut BufReader::new(f.as_slice())) - .map(|result| { - result.map_err(|err| zerror!("Invalid QUIC CA private key file: {}", err)) - }) - .collect::, ZError>>()?; - - let private_key = items - .into_iter() - .filter_map(|x| match x { - rustls_pemfile::Item::Pkcs1Key(k) => Some(k.secret_pkcs1_der().to_vec()), - rustls_pemfile::Item::Pkcs8Key(k) => Some(k.secret_pkcs8_der().to_vec()), - rustls_pemfile::Item::Sec1Key(k) => Some(k.secret_sec1_der().to_vec()), - _ => None, - }) - .take(1) - .next() - .ok_or_else(|| zerror!("No QUIC CA private key has been provided.")) - .map(PrivateKey)?; - - // Server config - let mut server_crypto = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_single_cert(certificates, private_key)?; - server_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); - let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_crypto)); - - // We do not accept unidireactional streams. - Arc::get_mut(&mut server_config.transport) - .unwrap() - .max_concurrent_uni_streams(0_u8.into()); - // For the time being we only allow one bidirectional stream - Arc::get_mut(&mut server_config.transport) - .unwrap() - .max_concurrent_bidi_streams(1_u8.into()); - - // Initialize the Endpoint - let quic_endpoint = quinn::Endpoint::server(server_config, addr) - .map_err(|e| zerror!("Can not create a new QUIC listener on {}: {}", addr, e))?; - - let local_addr = quic_endpoint - .local_addr() - .map_err(|e| zerror!("Can not create a new QUIC listener on {}: {}", addr, e))?; - - // Update the endpoint locator address - endpoint = EndPoint::new( - endpoint.protocol(), - local_addr.to_string(), - endpoint.metadata(), - endpoint.config(), - )?; - - // Spawn the accept loop for the listener - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - let mut listeners = zwrite!(self.listeners); - - let c_active = active.clone(); - let c_signal = signal.clone(); - let c_manager = self.manager.clone(); - let c_listeners = self.listeners.clone(); - let c_addr = local_addr; - let handle = task::spawn(async move { - // Wait for the accept loop to terminate - let res = accept_task(quic_endpoint, c_active, c_signal, c_manager).await; - zwrite!(c_listeners).remove(&c_addr); - res - }); - - // Initialize the QuicAcceptor - let locator = endpoint.to_locator(); - let listener = ListenerUnicastQuic::new(endpoint, active, signal, handle); - // Update the list of active listeners on the manager - listeners.insert(local_addr, listener); - - Ok(locator) - } - - async fn del_listener(&self, endpoint: &EndPoint) -> ZResult<()> { - let epaddr = endpoint.address(); - - let addr = get_quic_addr(&epaddr).await?; - - // Stop the listener - let listener = zwrite!(self.listeners).remove(&addr).ok_or_else(|| { - let e = zerror!( - "Can not delete the QUIC listener because it has not been found: {}", - addr - ); - log::trace!("{}", e); - e - })?; - - // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - listener.handle.await - } - - fn get_listeners(&self) -> Vec { - zread!(self.listeners) - .values() - .map(|x| x.endpoint.clone()) - .collect() - } - - fn get_locators(&self) -> Vec { - let mut locators = vec![]; - - let guard = zread!(self.listeners); - for (key, value) in guard.iter() { - let (kip, kpt) = (key.ip(), key.port()); - - // Either ipv4/0.0.0.0 or ipv6/[::] - if kip.is_unspecified() { - let mut addrs = match kip { - IpAddr::V4(_) => zenoh_util::net::get_ipv4_ipaddrs(), - IpAddr::V6(_) => zenoh_util::net::get_ipv6_ipaddrs(), - }; - let iter = addrs.drain(..).map(|x| { - Locator::new( - value.endpoint.protocol(), - SocketAddr::new(x, kpt).to_string(), - value.endpoint.metadata(), - ) - .unwrap() - }); - locators.extend(iter); - } else { - locators.push(value.endpoint.to_locator()); - } - } - - locators - } -} - -async fn accept_task( - endpoint: quinn::Endpoint, - active: Arc, - signal: Signal, - manager: NewLinkChannelSender, -) -> ZResult<()> { - enum Action { - Accept(quinn::Connection), - Stop, - } - - async fn accept(acceptor: quinn::Accept<'_>) -> ZResult { - let qc = acceptor - .await - .ok_or_else(|| zerror!("Can not accept QUIC connections: acceptor closed"))?; - - let conn = qc.await.map_err(|e| { - let e = zerror!("QUIC acceptor failed: {:?}", e); - log::warn!("{}", e); - e - })?; - - Ok(Action::Accept(conn)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) - } - - let src_addr = endpoint - .local_addr() - .map_err(|e| zerror!("Can not accept QUIC connections: {}", e))?; - - // The accept future - log::trace!("Ready to accept QUIC connections on: {:?}", src_addr); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let quic_conn = match accept(endpoint.accept()).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept(qc) => qc, - Action::Stop => break, - }, - Err(e) => { - log::warn!("{} Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*QUIC_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - - // Get the bideractional streams. Note that we don't allow unidirectional streams. - let (send, recv) = match quic_conn.accept_bi().await { - Ok(stream) => stream, - Err(e) => { - log::warn!("QUIC connection has no streams: {:?}", e); - continue; - } - }; - - // Create the new link object - // let pi = &quic_conn.peer_identity().unwrap(); - //println!("the accept function is also called before check"); - - // match quic_conn - // .peer_identity() - // .unwrap() - // .downcast_ref::>() - // { - // Some(serv_certs) => { - // println!("the client certs were found"); - // for item in serv_certs { - // let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); - // let subject_name = &cert - // .subject - // .iter_common_name() - // .next() - // .and_then(|cn| cn.as_str().ok()) - // .unwrap(); - // let auth_identifier = AuthIdentifier { - // username: None, - // tls_cert_name: Some(subject_name.to_string()), - // }; - - // println!("auth_identifier: {:?}", auth_identifier); - // } - // } - // None => { - // println!("no client certs found"); - // } - // } - let dst_addr = quic_conn.remote_address(); - log::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); - let link = Arc::new(LinkUnicastQuic::new( - quic_conn, - src_addr, - Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, - send, - recv, - )); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) - } - } - - Ok(()) -} diff --git a/io/zenoh-links/zenoh-link-quic/src/unicast.rs b/io/zenoh-links/zenoh-link-quic/src/unicast.rs index 366860801e..13903eb1b2 100644 --- a/io/zenoh-links/zenoh-link-quic/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-quic/src/unicast.rs @@ -11,6 +11,7 @@ // Contributors: // ZettaScale Zenoh Team, // +use x509_parser::prelude::*; use crate::base64_decode; use crate::{ @@ -32,7 +33,7 @@ use std::sync::Arc; use std::time::Duration; use zenoh_core::zasynclock; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, AuthIdentifier, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; @@ -46,6 +47,7 @@ pub struct LinkUnicastQuic { dst_locator: Locator, send: AsyncMutex, recv: AsyncMutex, + auth_identifier: Option, } impl LinkUnicastQuic { @@ -55,6 +57,7 @@ impl LinkUnicastQuic { dst_locator: Locator, send: quinn::SendStream, recv: quinn::RecvStream, + auth_identifier: Option, ) -> LinkUnicastQuic { // Build the Quic object LinkUnicastQuic { @@ -64,6 +67,7 @@ impl LinkUnicastQuic { dst_locator, send: AsyncMutex::new(send), recv: AsyncMutex::new(recv), + auth_identifier, } } } @@ -156,6 +160,15 @@ impl LinkUnicastTrait for LinkUnicastQuic { fn is_streamed(&self) -> bool { true } + fn get_auth_identifier(&self) -> AuthIdentifier { + match &self.auth_identifier { + Some(identifier) => identifier.clone(), + None => AuthIdentifier { + username: None, + tls_cert_name: None, + }, + } + } } impl Drop for LinkUnicastQuic { @@ -296,14 +309,42 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastQuic { .await .map_err(|e| zerror!("Can not create a new QUIC link bound to {}: {}", host, e))?; + let mut test_auth_id: Option = None; + + let pi = &quic_conn.peer_identity().unwrap(); + match pi.downcast_ref::>() { + Some(serv_certs) => { + println!("the server certs were found"); + for item in serv_certs { + let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + let auth_identifier = AuthIdentifier { + username: None, + tls_cert_name: Some(subject_name.to_string()), + }; + + println!("server side quic auth_identifier: {:?}", auth_identifier); + test_auth_id = Some(auth_identifier); + } + } + None => { + println!("no server certs found"); + } + } + let link = Arc::new(LinkUnicastQuic::new( quic_conn, src_addr, endpoint.into(), send, recv, + test_auth_id, )); - Ok(LinkUnicast(link)) } @@ -501,6 +542,51 @@ async fn accept_task( continue; } }; + println!("the accept function is also called before check"); + let mut test_auth_id: Option = None; + { + let new_conn = quic_conn.clone(); + println!( + "handshake information: {:?}", + new_conn + .handshake_data() + .unwrap() + .downcast_ref::() + .unwrap() + .server_name + ); + //println!("connection info: {:?}", new_conn); + if let Some(cert_info) = new_conn.peer_identity() { + //use cert info + + match cert_info.downcast_ref::>() { + Some(serv_certs) => { + println!("the client certs were found"); + for item in serv_certs { + let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); + let subject_name = &cert + .subject + .iter_common_name() + .next() + .and_then(|cn| cn.as_str().ok()) + .unwrap(); + let auth_identifier = AuthIdentifier { + username: None, + tls_cert_name: Some(subject_name.to_string()), + }; + + println!("client side quic auth_identifier: {:?}", auth_identifier); + test_auth_id = Some(auth_identifier); + } + } + None => { + println!("no client certs found"); + } + } + } else { + println!("no certs were found"); + } + } let dst_addr = quic_conn.remote_address(); log::debug!("Accepted QUIC connection on {:?}: {:?}", src_addr, dst_addr); @@ -511,6 +597,7 @@ async fn accept_task( Locator::new(QUIC_LOCATOR_PREFIX, dst_addr.to_string(), "")?, send, recv, + test_auth_id, )); // Communicate the new link to the initial transport manager diff --git a/io/zenoh-links/zenoh-link-serial/src/unicast.rs b/io/zenoh-links/zenoh-link-serial/src/unicast.rs index fafac4c393..9f28d251a1 100644 --- a/io/zenoh-links/zenoh-link-serial/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-serial/src/unicast.rs @@ -25,8 +25,8 @@ use std::sync::{Arc, RwLock}; use std::time::Duration; use zenoh_core::{zasynclock, zread, zwrite}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + AuthIdentifier, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; @@ -206,6 +206,12 @@ impl LinkUnicastTrait for LinkUnicastSerial { fn is_streamed(&self) -> bool { false } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl fmt::Display for LinkUnicastSerial { diff --git a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs index b01d8be22e..22367aa2ee 100644 --- a/io/zenoh-links/zenoh-link-tcp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tcp/src/unicast.rs @@ -22,7 +22,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use zenoh_link_commons::{ - get_ip_interface_names, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, + get_ip_interface_names, AuthIdentifier, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; @@ -156,6 +156,12 @@ impl LinkUnicastTrait for LinkUnicastTcp { fn is_streamed(&self) -> bool { true } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl Drop for LinkUnicastTcp { diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast-modified.rs b/io/zenoh-links/zenoh-link-tls/src/unicast-modified.rs deleted file mode 100644 index 4d6f316fbd..0000000000 --- a/io/zenoh-links/zenoh-link-tls/src/unicast-modified.rs +++ /dev/null @@ -1,916 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// ZettaScale Zenoh Team, -// -use x509_parser::prelude::*; - -use crate::{ - base64_decode, config::*, get_tls_addr, get_tls_host, get_tls_server_name, - verify::WebPkiVerifierAnyServerName, TLS_ACCEPT_THROTTLE_TIME, TLS_DEFAULT_MTU, - TLS_LINGER_TIMEOUT, TLS_LOCATOR_PREFIX, -}; -use async_rustls::{ - rustls::{ - server::AllowAnyAuthenticatedClient, version::TLS13, Certificate, ClientConfig, - OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig, - }, - TlsAcceptor, TlsConnector, TlsStream, -}; -use async_std::fs; -use async_std::net::{SocketAddr, TcpListener, TcpStream}; -use async_std::prelude::FutureExt; -use async_std::sync::Mutex as AsyncMutex; -use async_std::task; -use async_std::task::JoinHandle; -use async_trait::async_trait; -use futures::io::AsyncReadExt; -use futures::io::AsyncWriteExt; -use std::collections::HashMap; -use std::convert::TryInto; -use std::fmt; -use std::fs::File; -use std::io::{BufReader, Cursor}; -use std::net::{IpAddr, Shutdown}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock}; -use std::time::Duration; -use std::{cell::UnsafeCell, io}; -use webpki::{ - anchor_from_trusted_cert, - types::{CertificateDer, TrustAnchor}, -}; -use zenoh_core::{zasynclock, zread, zwrite}; -use zenoh_link_commons::{ - AuthIdentifier, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, -}; -use zenoh_protocol::core::endpoint::Config; -use zenoh_protocol::core::{EndPoint, Locator}; -use zenoh_result::{bail, zerror, ZError, ZResult}; -use zenoh_sync::Signal; - -pub struct LinkUnicastTls { - // The underlying socket as returned from the async-rustls library - // NOTE: TlsStream requires &mut for read and write operations. This means - // that concurrent reads and writes are not possible. To achieve that, - // we use an UnsafeCell for interior mutability. Using an UnsafeCell - // is safe in our case since the transmission and reception logic - // already ensures that no concurrent reads or writes can happen on - // the same stream: there is only one task at the time that writes on - // the stream and only one task at the time that reads from the stream. - inner: UnsafeCell>, - // The source socket address of this link (address used on the local host) - src_addr: SocketAddr, - src_locator: Locator, - // The destination socket address of this link (address used on the local host) - dst_addr: SocketAddr, - dst_locator: Locator, - // Make sure there are no concurrent read or writes - write_mtx: AsyncMutex<()>, - read_mtx: AsyncMutex<()>, -} - -unsafe impl Send for LinkUnicastTls {} -unsafe impl Sync for LinkUnicastTls {} - -impl LinkUnicastTls { - fn new( - socket: TlsStream, - src_addr: SocketAddr, - dst_addr: SocketAddr, - ) -> LinkUnicastTls { - let (tcp_stream, _) = socket.get_ref(); - // Set the TLS nodelay option - if let Err(err) = tcp_stream.set_nodelay(true) { - log::warn!( - "Unable to set NODEALY option on TLS link {} => {}: {}", - src_addr, - dst_addr, - err - ); - } - - // Set the TLS linger option - if let Err(err) = zenoh_util::net::set_linger( - tcp_stream, - Some(Duration::from_secs( - (*TLS_LINGER_TIMEOUT).try_into().unwrap(), - )), - ) { - log::warn!( - "Unable to set LINGER option on TLS link {} => {}: {}", - src_addr, - dst_addr, - err - ); - } - - // Build the Tls object - LinkUnicastTls { - inner: UnsafeCell::new(socket), - src_addr, - src_locator: Locator::new(TLS_LOCATOR_PREFIX, src_addr.to_string(), "").unwrap(), - dst_addr, - dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), - write_mtx: AsyncMutex::new(()), - read_mtx: AsyncMutex::new(()), - } - } - - // NOTE: It is safe to suppress Clippy warning since no concurrent reads - // or concurrent writes will ever happen. The read_mtx and write_mtx - // are respectively acquired in any read and write operation. - #[allow(clippy::mut_from_ref)] - fn get_sock_mut(&self) -> &mut TlsStream { - unsafe { &mut *self.inner.get() } - } -} - -#[async_trait] -impl LinkUnicastTrait for LinkUnicastTls { - async fn close(&self) -> ZResult<()> { - log::trace!("Closing TLS link: {}", self); - // Flush the TLS stream - let _guard = zasynclock!(self.write_mtx); - let tls_stream = self.get_sock_mut(); - let res = tls_stream.flush().await; - log::trace!("TLS link flush {}: {:?}", self, res); - // Close the underlying TCP stream - let (tcp_stream, _) = tls_stream.get_ref(); - let res = tcp_stream.shutdown(Shutdown::Both); - log::trace!("TLS link shutdown {}: {:?}", self, res); - res.map_err(|e| zerror!(e).into()) - } - - async fn write(&self, buffer: &[u8]) -> ZResult { - let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write(buffer).await.map_err(|e| { - log::trace!("Write error on TLS link {}: {}", self, e); - zerror!(e).into() - }) - } - - async fn write_all(&self, buffer: &[u8]) -> ZResult<()> { - let _guard = zasynclock!(self.write_mtx); - self.get_sock_mut().write_all(buffer).await.map_err(|e| { - log::trace!("Write error on TLS link {}: {}", self, e); - zerror!(e).into() - }) - } - - async fn read(&self, buffer: &mut [u8]) -> ZResult { - let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read(buffer).await.map_err(|e| { - log::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e).into() - }) - } - - async fn read_exact(&self, buffer: &mut [u8]) -> ZResult<()> { - let _guard = zasynclock!(self.read_mtx); - self.get_sock_mut().read_exact(buffer).await.map_err(|e| { - log::trace!("Read error on TLS link {}: {}", self, e); - zerror!(e).into() - }) - } - - #[inline(always)] - fn get_src(&self) -> &Locator { - &self.src_locator - } - - #[inline(always)] - fn get_dst(&self) -> &Locator { - &self.dst_locator - } - - #[inline(always)] - fn get_mtu(&self) -> u16 { - *TLS_DEFAULT_MTU - } - - #[inline(always)] - fn get_interface_names(&self) -> Vec { - // @TODO: Not supported for now - log::debug!("The get_interface_names for LinkUnicastTls is not supported"); - vec![] - } - - #[inline(always)] - fn is_reliable(&self) -> bool { - true - } - - #[inline(always)] - fn is_streamed(&self) -> bool { - true - } -} - -impl Drop for LinkUnicastTls { - fn drop(&mut self) { - // Close the underlying TCP stream - let (tcp_stream, _) = self.get_sock_mut().get_ref(); - let _ = tcp_stream.shutdown(Shutdown::Both); - } -} - -impl fmt::Display for LinkUnicastTls { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{} => {}", self.src_addr, self.dst_addr)?; - Ok(()) - } -} - -impl fmt::Debug for LinkUnicastTls { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Tls") - .field("src", &self.src_addr) - .field("dst", &self.dst_addr) - .finish() - } -} - -/*************************************/ -/* LISTENER */ -/*************************************/ -struct ListenerUnicastTls { - endpoint: EndPoint, - active: Arc, - signal: Signal, - handle: JoinHandle>, -} - -impl ListenerUnicastTls { - fn new( - endpoint: EndPoint, - active: Arc, - signal: Signal, - handle: JoinHandle>, - ) -> ListenerUnicastTls { - ListenerUnicastTls { - endpoint, - active, - signal, - handle, - } - } -} - -pub struct LinkManagerUnicastTls { - manager: NewLinkChannelSender, - listeners: Arc>>, -} - -impl LinkManagerUnicastTls { - pub fn new(manager: NewLinkChannelSender) -> Self { - Self { - manager, - listeners: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -#[async_trait] -impl LinkManagerUnicastTrait for LinkManagerUnicastTls { - async fn new_link(&self, endpoint: EndPoint) -> ZResult { - let epaddr = endpoint.address(); - let epconf = endpoint.config(); - - let server_name = get_tls_server_name(&epaddr)?; - let addr = get_tls_addr(&epaddr).await?; - - // Initialize the TLS Config - let client_config = TlsClientConfig::new(&epconf) - .await - .map_err(|e| zerror!("Cannot create a new TLS listener to {endpoint}: {e}"))?; - let config = Arc::new(client_config.client_config); - let connector = TlsConnector::from(config); - - // Initialize the TcpStream - let tcp_stream = TcpStream::connect(addr).await.map_err(|e| { - zerror!( - "Can not create a new TLS link bound to {:?}: {}", - server_name, - e - ) - })?; - - let src_addr = tcp_stream.local_addr().map_err(|e| { - zerror!( - "Can not create a new TLS link bound to {:?}: {}", - server_name, - e - ) - })?; - - let dst_addr = tcp_stream.peer_addr().map_err(|e| { - zerror!( - "Can not create a new TLS link bound to {:?}: {}", - server_name, - e - ) - })?; - - // Initialize the TlsStream - let tls_stream = connector - .connect(server_name.to_owned(), tcp_stream) - .await - .map_err(|e| { - zerror!( - "Can not create a new TLS link bound to {:?}: {}", - server_name, - e - ) - })?; - let (_, tls_conn) = tls_stream.get_ref(); - - let serv_certs = tls_conn.peer_certificates().unwrap(); - - for item in serv_certs { - let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); - let subject_name = &cert - .subject - .iter_common_name() - .next() - .and_then(|cn| cn.as_str().ok()) - .unwrap(); - let auth_identifier = AuthIdentifier { - username: None, - tls_cert_name: Some(subject_name.to_string()), - }; - - println!("auth_identifier: {:?}", auth_identifier); - } - let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); - - Ok(LinkUnicast(link)) - } - - async fn new_listener(&self, endpoint: EndPoint) -> ZResult { - let epaddr = endpoint.address(); - let epconf = endpoint.config(); - - let addr = get_tls_addr(&epaddr).await?; - let host = get_tls_host(&epaddr)?; - - // Initialize TlsConfig - let tls_server_config = TlsServerConfig::new(&epconf) - .await - .map_err(|e| zerror!("Cannot create a new TLS listener on {addr}. {e}"))?; - - // Initialize the TcpListener - let socket = TcpListener::bind(addr) - .await - .map_err(|e| zerror!("Can not create a new TLS listener on {}: {}", addr, e))?; - - let local_addr = socket - .local_addr() - .map_err(|e| zerror!("Can not create a new TLS listener on {}: {}", addr, e))?; - let local_port = local_addr.port(); - - // Initialize the TlsAcceptor - let acceptor = TlsAcceptor::from(Arc::new(tls_server_config.server_config)); - let active = Arc::new(AtomicBool::new(true)); - let signal = Signal::new(); - - // Spawn the accept loop for the listener - let c_active = active.clone(); - let c_signal = signal.clone(); - let c_manager = self.manager.clone(); - let c_listeners = self.listeners.clone(); - let c_addr = local_addr; - let handle = task::spawn(async move { - // Wait for the accept loop to terminate - let res = accept_task(socket, acceptor, c_active, c_signal, c_manager).await; - zwrite!(c_listeners).remove(&c_addr); - res - }); - - // Update the endpoint locator address - let locator = Locator::new( - endpoint.protocol(), - format!("{host}:{local_port}"), - endpoint.metadata(), - )?; - - let listener = ListenerUnicastTls::new(endpoint, active, signal, handle); - // Update the list of active listeners on the manager - zwrite!(self.listeners).insert(local_addr, listener); - - Ok(locator) - } - - async fn del_listener(&self, endpoint: &EndPoint) -> ZResult<()> { - let epaddr = endpoint.address(); - - let addr = get_tls_addr(&epaddr).await?; - - // Stop the listener - let listener = zwrite!(self.listeners).remove(&addr).ok_or_else(|| { - let e = zerror!( - "Can not delete the TLS listener because it has not been found: {}", - addr - ); - log::trace!("{}", e); - e - })?; - - // Send the stop signal - listener.active.store(false, Ordering::Release); - listener.signal.trigger(); - listener.handle.await - } - - fn get_listeners(&self) -> Vec { - zread!(self.listeners) - .values() - .map(|x| x.endpoint.clone()) - .collect() - } - - fn get_locators(&self) -> Vec { - let mut locators = vec![]; - - let guard = zread!(self.listeners); - for (key, value) in guard.iter() { - let (kip, kpt) = (key.ip(), key.port()); - - // Either ipv4/0.0.0.0 or ipv6/[::] - if kip.is_unspecified() { - let mut addrs = match kip { - IpAddr::V4(_) => zenoh_util::net::get_ipv4_ipaddrs(), - IpAddr::V6(_) => zenoh_util::net::get_ipv6_ipaddrs(), - }; - let iter = addrs.drain(..).map(|x| { - Locator::new( - value.endpoint.protocol(), - SocketAddr::new(x, kpt).to_string(), - value.endpoint.metadata(), - ) - .unwrap() - }); - locators.extend(iter); - } else { - locators.push(value.endpoint.to_locator()); - } - } - - locators - } -} - -async fn accept_task( - socket: TcpListener, - acceptor: TlsAcceptor, - active: Arc, - signal: Signal, - manager: NewLinkChannelSender, -) -> ZResult<()> { - enum Action { - Accept((TcpStream, SocketAddr)), - Stop, - } - - async fn accept(socket: &TcpListener) -> ZResult { - let res = socket.accept().await.map_err(|e| zerror!(e))?; - Ok(Action::Accept(res)) - } - - async fn stop(signal: Signal) -> ZResult { - signal.wait().await; - Ok(Action::Stop) - } - - let src_addr = socket.local_addr().map_err(|e| { - let e = zerror!("Can not accept TLS connections: {}", e); - log::warn!("{}", e); - e - })?; - - log::trace!("Ready to accept TLS connections on: {:?}", src_addr); - while active.load(Ordering::Acquire) { - // Wait for incoming connections - let (tcp_stream, dst_addr) = match accept(&socket).race(stop(signal.clone())).await { - Ok(action) => match action { - Action::Accept((tcp_stream, dst_addr)) => (tcp_stream, dst_addr), - Action::Stop => break, - }, - Err(e) => { - log::warn!("{}. Hint: increase the system open file limit.", e); - // Throttle the accept loop upon an error - // NOTE: This might be due to various factors. However, the most common case is that - // the process has reached the maximum number of open files in the system. On - // Linux systems this limit can be changed by using the "ulimit" command line - // tool. In case of systemd-based systems, this can be changed by using the - // "sysctl" command line tool. - task::sleep(Duration::from_micros(*TLS_ACCEPT_THROTTLE_TIME)).await; - continue; - } - }; - // Accept the TLS connection - let tls_stream = match acceptor.accept(tcp_stream).await { - Ok(stream) => TlsStream::Server(stream), - Err(e) => { - let e = format!("Can not accept TLS connection: {e}"); - log::warn!("{}", e); - continue; - } - }; - let (_, tls_conn) = tls_stream.get_ref(); - let auth_identifier = get_tls_cert_name(tls_conn); - match auth_identifier { - Some(auth_id) => println!("client's ID: {:?}", auth_id), - None => println!("no client ID found"), - } - - log::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); - // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); - - // Communicate the new link to the initial transport manager - if let Err(e) = manager.send_async(LinkUnicast(link)).await { - log::error!("{}-{}: {}", file!(), line!(), e) - } - } - - Ok(()) -} -fn get_tls_cert_name(tls_conn: &rustls::CommonState) -> Option { - let serv_certs = tls_conn.peer_certificates()?; - let (_, cert) = X509Certificate::from_der(serv_certs[0].as_ref()).unwrap(); - let subject_name = &cert - .subject - .iter_common_name() - .next() - .and_then(|cn| cn.as_str().ok()) - .unwrap(); - - Some(AuthIdentifier { - username: None, - tls_cert_name: Some(subject_name.to_string()), - }) -} -struct TlsServerConfig { - server_config: ServerConfig, -} - -impl TlsServerConfig { - pub async fn new(config: &Config<'_>) -> ZResult { - let tls_server_client_auth: bool = match config.get(TLS_CLIENT_AUTH) { - Some(s) => s - .parse() - .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, - None => false, - }; - let tls_server_private_key = TlsServerConfig::load_tls_private_key(config).await?; - let tls_server_certificate = TlsServerConfig::load_tls_certificate(config).await?; - - let certs: Vec = - rustls_pemfile::certs(&mut Cursor::new(&tls_server_certificate)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server certificate: {err}.")) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - let mut keys: Vec = - rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) - }) - .collect::, ZError>>()?; - - if keys.is_empty() { - keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) - }) - .collect::, ZError>>()?; - } - - if keys.is_empty() { - keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_server_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing server key: {err}.")) - .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) - }) - .collect::, ZError>>()?; - } - - if keys.is_empty() { - bail!("No private key found for TLS server."); - } - - let sc = if tls_server_client_auth { - let root_cert_store = load_trust_anchors(config)?.map_or_else( - || { - Err(zerror!( - "Missing root certificates while client authentication is enabled." - )) - }, - Ok, - )?; - ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13]) // Force TLS 1.3 - .map_err(|e| zerror!(e))? - .with_client_cert_verifier(Arc::new(AllowAnyAuthenticatedClient::new(root_cert_store))) - .with_single_cert(certs, keys.remove(0)) - .map_err(|e| zerror!(e))? - } else { - ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_single_cert(certs, keys.remove(0)) - .map_err(|e| zerror!(e))? - }; - Ok(TlsServerConfig { server_config: sc }) - } - - async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { - load_tls_key( - config, - TLS_SERVER_PRIVATE_KEY_RAW, - TLS_SERVER_PRIVATE_KEY_FILE, - TLS_SERVER_PRIVATE_KEY_BASE_64, - ) - .await - } - - async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { - load_tls_certificate( - config, - TLS_SERVER_CERTIFICATE_RAW, - TLS_SERVER_CERTIFICATE_FILE, - TLS_SERVER_CERTIFICATE_BASE64, - ) - .await - } -} - -struct TlsClientConfig { - client_config: ClientConfig, -} - -impl TlsClientConfig { - pub async fn new(config: &Config<'_>) -> ZResult { - let tls_client_server_auth: bool = match config.get(TLS_CLIENT_AUTH) { - Some(s) => s - .parse() - .map_err(|_| zerror!("Unknown client auth argument: {}", s))?, - None => false, - }; - - let tls_server_name_verification: bool = match config.get(TLS_SERVER_NAME_VERIFICATION) { - Some(s) => { - let s: bool = s - .parse() - .map_err(|_| zerror!("Unknown server name verification argument: {}", s))?; - if s { - log::warn!("Skipping name verification of servers"); - } - s - } - None => false, - }; - - // Allows mixed user-generated CA and webPKI CA - log::debug!("Loading default Web PKI certificates."); - let mut root_cert_store: RootCertStore = RootCertStore { - roots: load_default_webpki_certs().roots, - }; - - if let Some(custom_root_cert) = load_trust_anchors(config)? { - log::debug!("Loading user-generated certificates."); - root_cert_store.add_trust_anchors(custom_root_cert.roots.into_iter()); - } - - let cc = if tls_client_server_auth { - log::debug!("Loading client authentication key and certificate..."); - let tls_client_private_key = TlsClientConfig::load_tls_private_key(config).await?; - let tls_client_certificate = TlsClientConfig::load_tls_certificate(config).await?; - - let certs: Vec = - rustls_pemfile::certs(&mut Cursor::new(&tls_client_certificate)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client certificate: {err}.")) - .map(|der| Certificate(der.to_vec())) - }) - .collect::, ZError>>()?; - - let mut keys: Vec = - rustls_pemfile::rsa_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs1_der().to_vec())) - }) - .collect::, ZError>>()?; - - if keys.is_empty() { - keys = - rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_pkcs8_der().to_vec())) - }) - .collect::, ZError>>()?; - } - - if keys.is_empty() { - keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&tls_client_private_key)) - .map(|result| { - result - .map_err(|err| zerror!("Error processing client key: {err}.")) - .map(|key| PrivateKey(key.secret_sec1_der().to_vec())) - }) - .collect::, ZError>>()?; - } - - if keys.is_empty() { - bail!("No private key found for TLS client."); - } - - let builder = ClientConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&TLS13]) - .map_err(|e| zerror!("Config parameters should be valid: {}", e))?; - - if tls_server_name_verification { - builder - .with_root_certificates(root_cert_store) - .with_client_auth_cert(certs, keys.remove(0)) - } else { - builder - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_client_auth_cert(certs, keys.remove(0)) - } - .map_err(|e| zerror!("Bad certificate/key: {}", e))? - } else { - let builder = ClientConfig::builder().with_safe_defaults(); - if tls_server_name_verification { - builder - .with_root_certificates(root_cert_store) - .with_no_client_auth() - } else { - builder - .with_custom_certificate_verifier(Arc::new(WebPkiVerifierAnyServerName::new( - root_cert_store, - ))) - .with_no_client_auth() - } - }; - Ok(TlsClientConfig { client_config: cc }) - } - - async fn load_tls_private_key(config: &Config<'_>) -> ZResult> { - load_tls_key( - config, - TLS_CLIENT_PRIVATE_KEY_RAW, - TLS_CLIENT_PRIVATE_KEY_FILE, - TLS_CLIENT_PRIVATE_KEY_BASE64, - ) - .await - } - - async fn load_tls_certificate(config: &Config<'_>) -> ZResult> { - load_tls_certificate( - config, - TLS_CLIENT_CERTIFICATE_RAW, - TLS_CLIENT_CERTIFICATE_FILE, - TLS_CLIENT_CERTIFICATE_BASE64, - ) - .await - } -} - -async fn load_tls_key( - config: &Config<'_>, - tls_private_key_raw_config_key: &str, - tls_private_key_file_config_key: &str, - tls_private_key_base64_config_key: &str, -) -> ZResult> { - if let Some(value) = config.get(tls_private_key_raw_config_key) { - return Ok(value.as_bytes().to_vec()); - } else if let Some(b64_key) = config.get(tls_private_key_base64_config_key) { - return base64_decode(b64_key); - } else if let Some(value) = config.get(tls_private_key_file_config_key) { - return Ok(fs::read(value) - .await - .map_err(|e| zerror!("Invalid TLS private key file: {}", e))?) - .and_then(|result| { - if result.is_empty() { - Err(zerror!("Empty TLS key.").into()) - } else { - Ok(result) - } - }); - } - Err(zerror!("Missing TLS private key.").into()) -} - -async fn load_tls_certificate( - config: &Config<'_>, - tls_certificate_raw_config_key: &str, - tls_certificate_file_config_key: &str, - tls_certificate_base64_config_key: &str, -) -> ZResult> { - if let Some(value) = config.get(tls_certificate_raw_config_key) { - return Ok(value.as_bytes().to_vec()); - } else if let Some(b64_certificate) = config.get(tls_certificate_base64_config_key) { - return base64_decode(b64_certificate); - } else if let Some(value) = config.get(tls_certificate_file_config_key) { - return Ok(fs::read(value) - .await - .map_err(|e| zerror!("Invalid TLS certificate file: {}", e))?); - } - Err(zerror!("Missing tls certificates.").into()) -} - -fn load_trust_anchors(config: &Config<'_>) -> ZResult> { - let mut root_cert_store = RootCertStore::empty(); - if let Some(value) = config.get(TLS_ROOT_CA_CERTIFICATE_RAW) { - let mut pem = BufReader::new(value.as_bytes()); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); - return Ok(Some(root_cert_store)); - } - - if let Some(b64_certificate) = config.get(TLS_ROOT_CA_CERTIFICATE_BASE64) { - let certificate_pem = base64_decode(b64_certificate)?; - let mut pem = BufReader::new(certificate_pem.as_slice()); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); - return Ok(Some(root_cert_store)); - } - - if let Some(filename) = config.get(TLS_ROOT_CA_CERTIFICATE_FILE) { - let mut pem = BufReader::new(File::open(filename)?); - let trust_anchors = process_pem(&mut pem)?; - root_cert_store.add_trust_anchors(trust_anchors.into_iter()); - return Ok(Some(root_cert_store)); - } - Ok(None) -} - -fn process_pem(pem: &mut dyn io::BufRead) -> ZResult> { - let certs: Vec = rustls_pemfile::certs(pem) - .map(|result| result.map_err(|err| zerror!("Error processing PEM certificates: {err}."))) - .collect::, ZError>>()?; - - let trust_anchors: Vec = certs - .into_iter() - .map(|cert| { - anchor_from_trusted_cert(&cert) - .map_err(|err| zerror!("Error processing trust anchor: {err}.")) - .map(|trust_anchor| trust_anchor.to_owned()) - }) - .collect::, ZError>>()?; - - let owned_trust_anchors: Vec = trust_anchors - .into_iter() - .map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.map(|x| x.to_vec()), - ) - }) - .collect(); - - Ok(owned_trust_anchors) -} - -fn load_default_webpki_certs() -> RootCertStore { - let mut root_cert_store = RootCertStore::empty(); - root_cert_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { - OwnedTrustAnchor::from_subject_spki_name_constraints( - ta.subject.to_vec(), - ta.subject_public_key_info.to_vec(), - ta.name_constraints.clone().map(|x| x.to_vec()), - ) - })); - root_cert_store -} diff --git a/io/zenoh-links/zenoh-link-tls/src/unicast.rs b/io/zenoh-links/zenoh-link-tls/src/unicast.rs index 8cb2d32698..5b744ed353 100644 --- a/io/zenoh-links/zenoh-link-tls/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-tls/src/unicast.rs @@ -74,6 +74,7 @@ pub struct LinkUnicastTls { // Make sure there are no concurrent read or writes write_mtx: AsyncMutex<()>, read_mtx: AsyncMutex<()>, + auth_identifier: Option, } unsafe impl Send for LinkUnicastTls {} @@ -84,6 +85,7 @@ impl LinkUnicastTls { socket: TlsStream, src_addr: SocketAddr, dst_addr: SocketAddr, + auth_identifier: Option, ) -> LinkUnicastTls { let (tcp_stream, _) = socket.get_ref(); // Set the TLS nodelay option @@ -120,6 +122,7 @@ impl LinkUnicastTls { dst_locator: Locator::new(TLS_LOCATOR_PREFIX, dst_addr.to_string(), "").unwrap(), write_mtx: AsyncMutex::new(()), read_mtx: AsyncMutex::new(()), + auth_identifier, } } @@ -209,6 +212,16 @@ impl LinkUnicastTrait for LinkUnicastTls { fn is_streamed(&self) -> bool { true } + #[inline(always)] + fn get_auth_identifier(&self) -> AuthIdentifier { + match &self.auth_identifier { + Some(identifier) => identifier.clone(), + None => AuthIdentifier { + username: None, + tls_cert_name: None, + }, + } + } } impl Drop for LinkUnicastTls { @@ -305,6 +318,7 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { let (_, tls_conn) = tls_stream.get_ref(); let serv_certs = tls_conn.peer_certificates().unwrap(); + let mut test_auth_id: Option = None; for item in serv_certs { let (_, cert) = X509Certificate::from_der(item.as_ref()).unwrap(); @@ -319,11 +333,17 @@ impl LinkManagerUnicastTrait for LinkManagerUnicastTls { tls_cert_name: Some(subject_name.to_string()), }; - println!("auth_identifier: {:?}", auth_identifier); + println!("server side tls auth_identifier: {:?}", auth_identifier); + test_auth_id = Some(auth_identifier); } let tls_stream = TlsStream::Client(tls_stream); - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + test_auth_id, + )); Ok(LinkUnicast(link)) } @@ -451,15 +471,22 @@ async fn accept_task( }; let (_, tls_conn) = tls_stream.get_ref(); + //let mut test_auth_id: Option = None; let auth_identifier = get_tls_cert_name(tls_conn); - match auth_identifier { - Some(auth_id) => println!("client's ID: {:?}", auth_id), - None => println!("no client ID found"), - } + println!("client side tls auth_identifier: {:?}", auth_identifier); + // match auth_identifier { + // Some(auth_id) => println!("client's ID: {:?}", auth_id), + // None => println!("no client ID found"), + // } log::debug!("Accepted TLS connection on {:?}: {:?}", src_addr, dst_addr); // Create the new link object - let link = Arc::new(LinkUnicastTls::new(tls_stream, src_addr, dst_addr)); + let link = Arc::new(LinkUnicastTls::new( + tls_stream, + src_addr, + dst_addr, + auth_identifier, + )); // Communicate the new link to the initial transport manager if let Err(e) = manager.send_async(LinkUnicast(link)).await { diff --git a/io/zenoh-links/zenoh-link-udp/src/unicast.rs b/io/zenoh-links/zenoh-link-udp/src/unicast.rs index d5214510be..2dde7bc085 100644 --- a/io/zenoh-links/zenoh-link-udp/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-udp/src/unicast.rs @@ -27,8 +27,9 @@ use std::sync::{Arc, Mutex, Weak}; use std::time::Duration; use zenoh_core::{zasynclock, zlock}; use zenoh_link_commons::{ - get_ip_interface_names, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, - LinkUnicastTrait, ListenersUnicastIP, NewLinkChannelSender, BIND_INTERFACE, + get_ip_interface_names, AuthIdentifier, ConstructibleLinkManagerUnicast, + LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, ListenersUnicastIP, + NewLinkChannelSender, BIND_INTERFACE, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{bail, zerror, Error as ZError, ZResult}; @@ -220,6 +221,12 @@ impl LinkUnicastTrait for LinkUnicastUdp { fn is_streamed(&self) -> bool { false } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl fmt::Display for LinkUnicastUdp { diff --git a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs index 83f6414dee..0fb4e7d2ef 100644 --- a/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixpipe/src/unix/unicast.rs @@ -35,8 +35,8 @@ use zenoh_protocol::core::{EndPoint, Locator}; use unix_named_pipe::{create, open_write}; use zenoh_link_commons::{ - ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, - NewLinkChannelSender, + AuthIdentifier, ConstructibleLinkManagerUnicast, LinkManagerUnicastTrait, LinkUnicast, + LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_result::{bail, ZResult}; @@ -508,6 +508,12 @@ impl LinkUnicastTrait for UnicastPipe { fn is_streamed(&self) -> bool { true } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl fmt::Display for UnicastPipe { diff --git a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs index 3ac1bcbfe6..3111108438 100644 --- a/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-unixsock_stream/src/unicast.rs @@ -31,7 +31,7 @@ use std::time::Duration; use uuid::Uuid; use zenoh_core::{zread, zwrite}; use zenoh_link_commons::{ - LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, + AuthIdentifier, LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; use zenoh_protocol::core::{EndPoint, Locator}; use zenoh_result::{zerror, ZResult}; @@ -131,6 +131,12 @@ impl LinkUnicastTrait for LinkUnicastUnixSocketStream { fn is_streamed(&self) -> bool { true } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl Drop for LinkUnicastUnixSocketStream { diff --git a/io/zenoh-links/zenoh-link-ws/src/unicast.rs b/io/zenoh-links/zenoh-link-ws/src/unicast.rs index 0ff1b1ab46..33862dc89c 100644 --- a/io/zenoh-links/zenoh-link-ws/src/unicast.rs +++ b/io/zenoh-links/zenoh-link-ws/src/unicast.rs @@ -32,6 +32,7 @@ use tokio_tungstenite::accept_async; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; use zenoh_core::{zasynclock, zread, zwrite}; +use zenoh_link_commons::AuthIdentifier; use zenoh_link_commons::{ LinkManagerUnicastTrait, LinkUnicast, LinkUnicastTrait, NewLinkChannelSender, }; @@ -223,6 +224,12 @@ impl LinkUnicastTrait for LinkUnicastWs { fn is_streamed(&self) -> bool { false } + fn get_auth_identifier(&self) -> AuthIdentifier { + AuthIdentifier { + username: None, + tls_cert_name: None, + } + } } impl Drop for LinkUnicastWs { diff --git a/myed25519.key b/myed25519.key new file mode 100644 index 0000000000..eec2318e30 --- /dev/null +++ b/myed25519.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIHqhe4/aAYsQ7CY1XCLIwCh+5nEZcCyIRZCbJcS233gc +-----END PRIVATE KEY----- diff --git a/myedserver.crt b/myedserver.crt new file mode 100644 index 0000000000..79f686e84a --- /dev/null +++ b/myedserver.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBvTCCAW+gAwIBAgIUNfcUs1gokRmiNXzXRxeVsK8PAVEwBQYDK2VwMFQxCzAJ +BgNVBAYTAkZSMQswCQYDVQQIDAJJRjELMAkGA1UEBwwCUFIxETAPBgNVBAoMCHpz +LCBJbmMuMRgwFgYDVQQDDA90ZXN0X3Rsc19zZXJ2ZXIwHhcNMjQwMzExMTUxNjA3 +WhcNMjUwMzExMTUxNjA3WjBUMQswCQYDVQQGEwJGUjELMAkGA1UECAwCSUYxCzAJ +BgNVBAcMAlBSMREwDwYDVQQKDAh6cywgSW5jLjEYMBYGA1UEAwwPdGVzdF90bHNf +c2VydmVyMCowBQYDK2VwAyEAtHIXi5k5PlaXGcUoqvN71aNZXipGzSiCrWfinY3r +zLejUzBRMB0GA1UdDgQWBBSCTrn8VCUbrLpvVaxc0e9tWWluRzAfBgNVHSMEGDAW +gBSCTrn8VCUbrLpvVaxc0e9tWWluRzAPBgNVHRMBAf8EBTADAQH/MAUGAytlcANB +AD9CgSNdcqVpoPGkwFqHnvDkTs9LxFOQWTMoUfnBMayOCOAKNl10LAyF6ST85QZc +USlMTo7EbXm/RmdqCBq3LQI= +-----END CERTIFICATE----- diff --git a/mypub2.crt b/mypub2.crt new file mode 100644 index 0000000000..63a916d900 --- /dev/null +++ b/mypub2.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIUcK4p0GvmJBQ3LpiK7iY4EwTlhekwDQYJKoZIhvcNAQEL +BQAwVTELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQHDAJQUjERMA8G +A1UECgwIenMsIEluYy4xGTAXBgNVBAMMEHpzX3Rlc3Rfcm9vdF9jYTIwHhcNMjQw +MzEzMTMxOTI2WhcNMjUwMzEzMTMxOTI2WjBQMQswCQYDVQQGEwJGUjELMAkGA1UE +CAwCSUYxCzAJBgNVBAcMAlBSMREwDwYDVQQKDAh6cywgSW5jLjEUMBIGA1UEAwwL +cHViMl9zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCp7qWN +2iA0+rhJMyyVrFXGcsqPNvz1ASTz2PHqhIOvS/tH2yaYaPkVGI3NHxvMp/USWUGj +we95ow2k7uPhuJ9q615tZ2u5+NFWjy72FU5MB0sFDbwB7Amb57KG5OyKk3gYyojL +TTyRevkMpokbpWfUoCgLx6LjMwzAlnbOoipsrh8Bg4VLadvhBFGfYtN6PrYOr5S1 +79L86aelaFZbxFqEmGmTQ8e8fPBn0DMasL8cIzavkkE+CAEXQV9SGIGddSc6MLVz +61mmh6umivfYEY4ZkkS4mK7zcKquhoYdLZK4yZWu+BtgCe+H4+jD3+Rv9TJ5YFU4 +MOCvgymUyOw4PExBAgMBAAGjWjBYMBYGA1UdEQQPMA2CC3B1YjJfc2VydmVyMB0G +A1UdDgQWBBTVL7smU8+53bqP3c7hvu21SNs5SzAfBgNVHSMEGDAWgBTbO03TDw3d +xyHU6YTEOL5mgdBVrDANBgkqhkiG9w0BAQsFAAOCAQEAKUbq0+KYUJthA51n/rV9 +dNhWqFRqKOuW5ubgK4NlgJxhRxE4HYjeqW41vMgbUYqcYrRlR/Mxg02AW0RE/TSl +31ptkDKVie2okJrAMGkz30M7N1Y4bTYEkdt0t4+cYr3ZqJF8uZfZCc3BWZlDTzWI +4N7zyxFvLE8qEsjkkAGMZYZyvudXsp9bSwtdLH6krOsFO1kbr1rjg/T7u9Dv8OvY +XPlWOOMXPM2pQV/eZP4/8LuuzLMrjROPx7oRNSbOzCNT3+pfiZh2A4pH8O1K5Jus +lcGN5lwsAQEkyQgVBC/XXmQ14Ke0yFXz/JRxM+wOD+UT8J5I4N6lRUrlaGJk/RtG +mA== +-----END CERTIFICATE----- diff --git a/mypub2.key b/mypub2.key new file mode 100644 index 0000000000..331427b8d0 --- /dev/null +++ b/mypub2.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCp7qWN2iA0+rhJ +MyyVrFXGcsqPNvz1ASTz2PHqhIOvS/tH2yaYaPkVGI3NHxvMp/USWUGjwe95ow2k +7uPhuJ9q615tZ2u5+NFWjy72FU5MB0sFDbwB7Amb57KG5OyKk3gYyojLTTyRevkM +pokbpWfUoCgLx6LjMwzAlnbOoipsrh8Bg4VLadvhBFGfYtN6PrYOr5S179L86ael +aFZbxFqEmGmTQ8e8fPBn0DMasL8cIzavkkE+CAEXQV9SGIGddSc6MLVz61mmh6um +ivfYEY4ZkkS4mK7zcKquhoYdLZK4yZWu+BtgCe+H4+jD3+Rv9TJ5YFU4MOCvgymU +yOw4PExBAgMBAAECggEABY9+Eli3YhRsMFUA2fr0KZS7BRmvCFNbwrDwJZTlfDh3 +xf+pUSZqNCMEDNi+P6GlVs7d435mmCvaN/HrOgkhCk4eXmUadNDsBLGVv06uK7W3 +YjhzVPrEy7m2sUxPYy91KBaEiGaEG1yTRrMyXFm6vo3pEY2cSmPywGCMDbHnwnuY +c/7ggx5XtvbDYkv4HZdiu0ZtxZGk4YhNXv58mHM24rmQLWwA7Y1Rw+amHqcStMQJ +BT64YqOnCO5fI6dx889zo9XAvRe7Nv9Fu4FOa2SneqyfkR9819CuxclJ1rAkYnFk +St5vrf19J2rwyAaoT1gA+jQ3Lj2SiZN3mEgPnBp32QKBgQDbtxgYfzXKdxJebwXy +9jl8iEp+8TVOvj+7AWj6AnfFgiqyXlWE891MSa5srxtxtFjwJGoDUB/ySd2DsXmo +rhx7SF5P7moUmg53D+qta1CORebaJ9/kcWnAGSZkraVFhEx0XghIctdWoXE9jWf8 +k3vt5Vq791v5WPzlMbf8rktvVQKBgQDF/uAc/vSFSl9+q+8ArhmZbHioLq8fSsIr +RvYofOVIUXYGHZI9lY+qqDMU/icizsg2Rm2VVCWTLeqBMpsxmaV+2KWAsqHh0hm8 +0m2vReN0TDnr4P6/ul53sEmNxXwd3NWeY+PhcH186vkPuwqycNHOT/R7aio5RXQK +wj/NrSWxPQKBgFAyL2BZplelSJYhbgl1qBv1X0OgZTW9qWNnq1p95hu4XD9IwWxK +2r6KsljHPXwuOLxGfk+BQnfcUDdOYzqXepvhGVORkTS92oPI0n7ECd40U4PTRByM +7O2KAIKFAysxk/pxjBJtoH3lZYDzCT6e0oBN0+WB7xc/TOeXUzGuqKgVAoGAa8mr +fF2YBJBOmIlFXdtp+EpDDVwM3j42opTJIZWMxOgEFqQ2nuwzADb9SCpsZ9imylVh +BvB+XDec+KTyM/hvTjTnNL8KvgNBG7h7GfY0M3Xj+nPMe9gb4ZDJGjMutJsqeEXt +Iye4SS8qU9QBqM8eiPCiKiXLws36tHi3f2MJqAkCgYEAkFu7cG50zH+k/qs/UOzk +FQXSVboVooBkHlOBpC+mjD7uPm69Id/83UsF+5wTZSW0EuZ6IxC/M+uuRX2qINv9 +RDDqXaszxvpPaNhjdoir4mGgnXYOgg256F4dqSTiBSlNH5puRkAiG8J+Ng4aPnES +YJYfM97wJO8rXKaA8z+TOjQ= +-----END PRIVATE KEY----- diff --git a/mysub.csr b/mysub.csr deleted file mode 100644 index dbe6d89071..0000000000 --- a/mysub.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICmDCCAYACAQAwUzELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAklGMQswCQYDVQQH -DAJQUjERMA8GA1UECgwIenMsIEluYy4xFzAVBgNVBAMMDnN1Yl90bHNfY2xpZW50 -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm8MSy4j1uWUoebudOtO8 -XUgnOOyvNbzRfIS5qkrVuTYRbjfO8pIwari4ZPxpN3VSzRFcKv6/EZP+qNITAen+ -8xj9+3nwOlD3eEC2x8Iw0ow07pOjrpf8Velz2eQWekSad1YpVqfXgSZy9QFt57Nt -ht99EE3UhBKPkrvArjm24S3Lwas86e2W6Z2hIlfjBFxsrzsMIitZt9Ao5zp3ZdaV -0koqOSJmdAmVucNBR2OIlA28K/6MQzGwLgz9g2OlpPXGrt/HWhyFh1Vm9a7dzV5h -ixX2C/MpWYZQ6w9ahdcr189D7dmkTQB9xSxia59y+robQPMYgRmvDpJlyuq+NFpF -nwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAIBa6ccode0L5STQm7npmTVJmx+w -doTDcM7dAUdc4iaDkQLKiEsUl7pClsbroZCVB/CctxkthukgZS5w8PSt1nJPqe7Q -lUrv88ARDPOBS6+XTIJZyXBxvz01DtvHxsLUmedPfVDGqQYCWGudcX+KWkJEApTz -pw888FneR+aKe2QatyoGfF3C0zSeZY2el/f6CHV9RFEBIBuPGZO05wfHPcDo92Kb -VScgQi/eMD81x3nKPD208CyyK3o96tUAQjzpXivHSnJmdDc4H6pKs3ufy0T0R3T/ -RwcY34MHhu5BxJ9gC0YQD0PlObFBL2MuQi/OlnmWcQ2R3UIKyw0jaSG42Ak= ------END CERTIFICATE REQUEST----- diff --git a/peer_pub.json5 b/peer_pub.json5 new file mode 100644 index 0000000000..cf5e2a343e --- /dev/null +++ b/peer_pub.json5 @@ -0,0 +1,12 @@ +{ + "mode": "peer", + "transport": { + "link": { + "tls": { + "root_ca_certificate": "ca.crt", + "server_private_key": "mypub2.key", + "server_certificate": "mypub2.crt" + } + } + } +} \ No newline at end of file diff --git a/peer_sub.json5 b/peer_sub.json5 new file mode 100644 index 0000000000..7143210394 --- /dev/null +++ b/peer_sub.json5 @@ -0,0 +1,12 @@ +{ + "mode": "peer", + "transport": { + "link": { + "tls": { + "root_ca_certificate": "ca.crt", + "server_private_key": "mysub.key", + "server_certificate": "mysub.crt" + } + } + } +} \ No newline at end of file diff --git a/pub_client.json5 b/pub_client.json5 index c33f5de2e4..b98cb7dcb4 100644 --- a/pub_client.json5 +++ b/pub_client.json5 @@ -173,7 +173,10 @@ /// If not configured, all the supported protocols are automatically whitelisted. /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], + protocols: [ + "tls", + "quic" + ], /// Configure the zenoh TX parameters of a link tx: { /// The resolution in bits to be used for the message sequence numbers. diff --git a/router.json5 b/router.json5 index 82afaf8f20..56ff9b5efa 100644 --- a/router.json5 +++ b/router.json5 @@ -173,7 +173,9 @@ /// If not configured, all the supported protocols are automatically whitelisted. /// The supported protocols are: ["tcp" , "udp", "tls", "quic", "ws", "unixsock-stream"] /// For example, to only enable "tls" and "quic": - // protocols: ["tls", "quic"], + protocols: [ + "tls" + ], /// Configure the zenoh TX parameters of a link tx: { /// The resolution in bits to be used for the message sequence numbers. diff --git a/router_quic.json5 b/router_quic.json5 new file mode 100644 index 0000000000..512e40aa29 --- /dev/null +++ b/router_quic.json5 @@ -0,0 +1,56 @@ +{ + mode: "peer", + /// Configure the scouting mechanisms and their behaviours +scouting: { + /// In client mode, the period dedicated to scouting for a router before failing + timeout: 0, + /// In peer mode, the period dedicated to scouting remote peers before attempting other operations + delay: 0, + /// The multicast scouting configuration. + multicast: { + /// Whether multicast scouting is enabled or not + enabled: false, + /// The socket which should be used for multicast scouting + address: "224.0.0.224:7446", + /// The network interface which should be used for multicast scouting + interface: "auto", // If not set or set to "auto" the interface if picked automatically + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on UDP multicast. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + /// Whether or not to listen for scout messages on UDP multicast and reply to them. + listen: false, + }, + /// The gossip scouting configuration. + gossip: { + /// Whether gossip scouting is enabled or not + enabled: false, + /// When true, gossip scouting informations are propagated multiple hops to all nodes in the local network. + /// When false, gossip scouting informations are only propagated to the next hop. + /// Activating multihop gossip implies more scouting traffic and a lower scalability. + /// It mostly makes sense when using "linkstate" routing mode where all nodes in the subsystem don't have + /// direct connectivity with each other. + multihop: false, + /// Which type of Zenoh instances to automatically establish sessions with upon discovery on gossip. + /// Accepts a single value or different values for router, peer and client. + /// Each value is bit-or-like combinations of "peer", "router" and "client". + autoconnect: { router: "", peer: "router|peer" + }, + }, + }, + "transport": { + "link": { + "protocols": [ + "quic", + // "tls" + ], + "tls": { + "root_ca_certificate": "client_ca.pem", + "client_auth": false, + "server_private_key": "server_key.pem", + "server_certificate": "server_cert.pem" + } + } + } +} \ No newline at end of file diff --git a/server_ca.pem b/server_ca.pem new file mode 100644 index 0000000000..83e287e214 --- /dev/null +++ b/server_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSzCCAjOgAwIBAgIITcwv1N10nqEwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgNGRjYzJmMCAXDTIzMDMwNjE2NDEwNloYDzIxMjMw +MzA2MTY0MTA2WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSA0ZGNjMmYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2WUgN7NMlXIknew1cXiTWGmS0 +1T1EjcNNDAq7DqZ7/ZVXrjD47yxTt5EOiOXK/cINKNw4Zq/MKQvq9qu+Oax4lwiV +Ha0i8ShGLSuYI1HBlXu4MmvdG+3/SjwYoGsGaShr0y/QGzD3cD+DQZg/RaaIPHlO +MdmiUXxkMcy4qa0hFJ1imlJdq/6Tlx46X+0vRCh8nkekvOZR+t7Z5U4jn4XE54Kl +0PiwcyX8vfDZ3epa/FSHZvVQieM/g5Yh9OjIKCkdWRg7tD0IEGsaW11tEPJ5SiQr +mDqdRneMzZKqY0xC+QqXSvIlzpOjiu8PYQx7xugaUFE/npKRQdvh8ojHJMdNAgMB +AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr +BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBTX46+p+Po1npE6 +QLQ7mMI+83s6qDAfBgNVHSMEGDAWgBTX46+p+Po1npE6QLQ7mMI+83s6qDANBgkq +hkiG9w0BAQsFAAOCAQEAaN0IvEC677PL/JXzMrXcyBV88IvimlYN0zCt48GYlhmx +vL1YUDFLJEB7J+dyERGE5N6BKKDGblC4WiTFgDMLcHFsMGRc0v7zKPF1PSBwRYJi +ubAmkwdunGG5pDPUYtTEDPXMlgClZ0YyqSFJMOqA4IzQg6exVjXtUxPqzxNhyC7S +vlgUwPbX46uNi581a9+Ls2V3fg0ZnhkTSctYZHGZNeh0Nsf7Am8xdUDYG/bZcVef +jbQ9gpChosdjF0Bgblo7HSUct/2Va+YlYwW+WFjJX8k4oN6ZU5W5xhdfO8Czmgwk +US5kJ/+1M0uR8zUhZHL61FbsdPxEj+fYKrHv4woo+A== +-----END CERTIFICATE----- diff --git a/server_cert.pem b/server_cert.pem new file mode 100644 index 0000000000..0afe5c63b8 --- /dev/null +++ b/server_cert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIIW1mAtJWJAJYwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgNGRjYzJmMCAXDTIzMDMwNjE2NDEwNloYDzIxMjMw +MzA2MTY0MTA2WjAUMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCYMLJKooc+YRlKEMfeV09pX9myH34eUcUuT0fXS8lm +PlZ/NW7mm5lDwa8EUg61WuXQv2ouQDptmIcdeb/w4RW93Xflkyng1Xbd91OwQBJd ++8ZVBjzL7hSRk3QPDqx/CVBU/I1GmXKzb6cWzq1fTkOn1WLNXf21I6p7+N3qHLPF +JQeoVq1HBBFcAjTgJnpyQNvRGLDuLTK+OsWEGib2U8qrgiRdkaBLkxGXSlGABlOo +cQyW/zOhf4pwb2Z/JAge2mRW5IcexCPBWint8ydPsoJDds8j5+AyYCD6HUhHX0Ob +Qkz73OW7f2PQhuTK2uzKy0Yz6lNFt2nuzaWC04wIW3T7AgMBAAGjdjB0MA4GA1Ud +DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T +AQH/BAIwADAfBgNVHSMEGDAWgBTX46+p+Po1npE6QLQ7mMI+83s6qDAUBgNVHREE +DTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAAxrmQPG54ybKgMVliN8 +Mg5povSdPIVVnlU/HOVG9yxzAOav/xQP003M4wqpatWxI8tR1PcLuZf0EPmcdJgb +tVl9nZMVZtveQnYMlU8PpkEVu56VM4Zr3rH9liPRlr0JEAXODdKw76kWKzmdqWZ/ +rzhup3Ek7iEX6T5j/cPUvTWtMD4VEK2I7fgoKSHIX8MIVzqM7cuboGWPtS3eRNXl +MgvahA4TwLEXPEe+V1WAq6nSb4g2qSXWIDpIsy/O1WGS/zzRnKvXu9/9NkXWqZMl +C1LSpiiQUaRSglOvYf/Zx6r+4BOS4OaaArwHkecZQqBSCcBLEAyb/FaaXdBowI0U +PQ4= +-----END CERTIFICATE----- diff --git a/server_key.pem b/server_key.pem new file mode 100644 index 0000000000..24fa62a138 --- /dev/null +++ b/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAmDCySqKHPmEZShDH3ldPaV/Zsh9+HlHFLk9H10vJZj5WfzVu +5puZQ8GvBFIOtVrl0L9qLkA6bZiHHXm/8OEVvd135ZMp4NV23fdTsEASXfvGVQY8 +y+4UkZN0Dw6sfwlQVPyNRplys2+nFs6tX05Dp9VizV39tSOqe/jd6hyzxSUHqFat +RwQRXAI04CZ6ckDb0Riw7i0yvjrFhBom9lPKq4IkXZGgS5MRl0pRgAZTqHEMlv8z +oX+KcG9mfyQIHtpkVuSHHsQjwVop7fMnT7KCQ3bPI+fgMmAg+h1IR19Dm0JM+9zl +u39j0IbkytrsystGM+pTRbdp7s2lgtOMCFt0+wIDAQABAoIBADNTSO2uvlmlOXgn +DKDJZTiuYKaXxFrJTOx/REUxg+x9XYJtLMeM9jVJnpKgceFrlFHAHDkY5BuN8xNX +ugmsfz6W8BZ2eQsgMoRNIuYv1YHopUyLW/mSg1FNHzjsw/Pb2kGvIp4Kpgopv3oL +naCkrmBtsHJ+Hk/2hUpl9cE8iMwVWcVevLzyHi98jNy1IDdIPhRtl0dhMiqC5MRr +4gLJ5gNkLYX7xf3tw5Hmfk/bVNProqZXDIQVI7rFvItX586nvQ3LNQkmW/D2ShZf +3FEqMu6EdA2Ycc4UZgAlQNGV0VBrWWVXizOQ+9gjLnBk3kJjqfigCU6NG94bTJ+H +0YIhsGECgYEAwdSSyuMSOXgzZQ7Vv+GsNn/7ivi/H8eb/lDzksqS/JroA2ciAmHG +2OF30eUJKRg+STqBTpOfXgS4QUa8QLSwBSnwcw6579x9bYGUhqD2Ypaw9uCnOukA +CwwggZ9cDmF0tb5rYjqkW3bFPqkCnTGb0ylMFaYRhRDU20iG5t8PQckCgYEAyQEM +KK18FLQUKivGrQgP5Ib6IC3myzlHGxDzfobXGpaQntFnHY7Cxp/6BBtmASzt9Jxu +etnrevmzrbKqsLTJSg3ivbiq0YTLAJ1FsZrCp71dx49YR/5o9QFiq0nQoKnwUVeb +/hrDjMAokNkjFL5vouXO711GSS6YyM4WzAKZAqMCgYEAhqGxaG06jmJ4SFx6ibIl +nSFeRhQrJNbP+mCeHrrIR98NArgS/laN+Lz7LfaJW1r0gIa7pCmTi4l5thV80vDu +RlfwJOr4qaucD4Du+mg5WxdSSdiXL6sBlarRtVdMaMy2dTqTegJDgShJLxHTt/3q +P0yzBWJ5TtT3FG0XDqum/EkCgYAYNHwWWe3bQGQ9P9BI/fOL/YUZYu2sA1XAuKXZ +0rsMhJ0dwvG76XkjGhitbe82rQZqsnvLZ3qn8HHmtOFBLkQfGtT3K8nGOUuI42eF +H7HZKUCly2lCIizZdDVBkz4AWvaJlRc/3lE2Hd3Es6E52kTvROVKhdz06xuS8t5j +6twqKQKBgQC01AeiWL6Rzo+yZNzVgbpeeDogaZz5dtmURDgCYH8yFX5eoCKLHfnI +2nDIoqpaHY0LuX+dinuH+jP4tlyndbc2muXnHd9r0atytxA69ay3sSA5WFtfi4ef +ESElGO6qXEA821RpQp+2+uhL90+iC294cPqlS5LDmvTMypVDHzrxPQ== +-----END RSA PRIVATE KEY----- diff --git a/sub_client.json5 b/sub_client.json5 index f04cc43b6d..30a688c176 100644 --- a/sub_client.json5 +++ b/sub_client.json5 @@ -234,29 +234,29 @@ max_message_size: 1073741824, }, /// Configure TLS specific parameters - tls: { - root_ca_certificate: "ca.crt" - } // tls: { - // /// Path to the certificate of the certificate authority used to validate either the server - // /// or the client's keys and certificates, depending on the node's mode. If not specified - // /// on router mode then the default WebPKI certificates are used instead. - // root_ca_certificate: null, - // /// Path to the TLS server private key - // server_private_key: null, - // /// Path to the TLS server public certificate - // server_certificate: null, - // /// Client authentication, if true enables mTLS (mutual authentication) - // client_auth: false, - // /// Path to the TLS client private key - // client_private_key: null, - // /// Path to the TLS client public certificate - // client_certificate: null, - // // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. - // // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your - // // ca to verify that the server at baz.com is actually baz.com, let this be true (default). - // server_name_verification: null, - // }, + // root_ca_certificate: "ca.crt" + // } + tls: { + /// Path to the certificate of the certificate authority used to validate either the server + /// or the client's keys and certificates, depending on the node's mode. If not specified + /// on router mode then the default WebPKI certificates are used instead. + root_ca_certificate: "ca.crt", + /// Path to the TLS server private key + server_private_key: null, + /// Path to the TLS server public certificate + server_certificate: null, + /// Client authentication, if true enables mTLS (mutual authentication) + client_auth: false, + /// Path to the TLS client private key + client_private_key: null, + /// Path to the TLS client public certificate + client_certificate: null, + // Whether or not to use server name verification, if set to false zenoh will disregard the common names of the certificates when verifying servers. + // This could be dangerous because your CA can have signed a server cert for foo.com, that's later being used to host a server at baz.com. If you wan't your + // ca to verify that the server at baz.com is actually baz.com, let this be true (default). + server_name_verification: null, + }, }, /// Shared memory configuration shared_memory: { diff --git a/zenoh/src/net/routing/interceptor/mod.rs b/zenoh/src/net/routing/interceptor/mod.rs index 9dfc03ac7e..bec391983c 100644 --- a/zenoh/src/net/routing/interceptor/mod.rs +++ b/zenoh/src/net/routing/interceptor/mod.rs @@ -26,7 +26,9 @@ use zenoh_result::ZResult; use zenoh_transport::{multicast::TransportMulticast, unicast::TransportUnicast}; pub mod downsampling; +pub mod testing_interceptor; use crate::net::routing::interceptor::downsampling::downsampling_interceptor_factories; +use crate::net::routing::interceptor::testing_interceptor::new_test_interceptor; pub(crate) trait InterceptorTrait { fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option>; @@ -60,7 +62,7 @@ pub(crate) fn interceptor_factories(config: &Config) -> ZResult, +} +struct IngressTestInterceptor { + _auth_id: Option, +} + +pub(crate) fn new_test_interceptor() -> ZResult> { + let res: Vec = vec![Box::new(TestInterceptor {})]; + Ok(res) +} + +impl InterceptorFactoryTrait for TestInterceptor { + fn new_transport_unicast( + &self, + transport: &TransportUnicast, + ) -> (Option, Option) { + if let Ok(links) = transport.get_links() { + for link in links { + println!("value recevied in interceptor {:?}", link.auth_identifier); + } + } + + ( + Some(Box::new(IngressTestInterceptor { _auth_id: None })), + Some(Box::new(EgressTestInterceptor { _auth_id: None })), + ) + } + + fn new_transport_multicast( + &self, + _transport: &TransportMulticast, + ) -> Option { + None + } + + fn new_peer_multicast(&self, _transport: &TransportMulticast) -> Option { + None + } +} + +impl InterceptorTrait for IngressTestInterceptor { + fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { + let _ = key_expr; + None + } + fn intercept<'a>( + &self, + ctx: RoutingContext, + cache: Option<&Box>, + ) -> Option> { + let _ = cache; + Some(ctx) + } +} + +impl InterceptorTrait for EgressTestInterceptor { + fn compute_keyexpr_cache(&self, key_expr: &KeyExpr<'_>) -> Option> { + let _ = key_expr; + None + } + fn intercept<'a>( + &self, + ctx: RoutingContext, + cache: Option<&Box>, + ) -> Option> { + let _ = cache; + Some(ctx) + } +}