diff --git a/lightning/src/util/atomic_counter.rs b/lightning/src/util/atomic_counter.rs index 050bcadf807..38a33aa8268 100644 --- a/lightning/src/util/atomic_counter.rs +++ b/lightning/src/util/atomic_counter.rs @@ -1,9 +1,9 @@ //! A simple atomic counter that uses mutexes if the platform doesn't support atomic u64s. -#[cfg(target_has_atomic = "64")] -use core::sync::atomic::{AtomicU64, Ordering}; #[cfg(not(target_has_atomic = "64"))] use crate::sync::Mutex; +#[cfg(target_has_atomic = "64")] +use core::sync::atomic::{AtomicU64, Ordering}; pub(crate) struct AtomicCounter { #[cfg(target_has_atomic = "64")] @@ -22,10 +22,12 @@ impl AtomicCounter { } } pub(crate) fn next(&self) -> u64 { - #[cfg(target_has_atomic = "64")] { + #[cfg(target_has_atomic = "64")] + { self.counter.fetch_add(1, Ordering::AcqRel) } - #[cfg(not(target_has_atomic = "64"))] { + #[cfg(not(target_has_atomic = "64"))] + { let mut mtx = self.counter.lock().unwrap(); *mtx += 1; *mtx - 1 @@ -33,12 +35,14 @@ impl AtomicCounter { } #[cfg(test)] pub(crate) fn set_counter(&self, count: u64) { - #[cfg(target_has_atomic = "64")] { + #[cfg(target_has_atomic = "64")] + { self.counter.store(count, Ordering::Release); } - #[cfg(not(target_has_atomic = "64"))] { + #[cfg(not(target_has_atomic = "64"))] + { let mut mtx = self.counter.lock().unwrap(); - *mtx = count; + *mtx = count; } } } diff --git a/lightning/src/util/base32.rs b/lightning/src/util/base32.rs index e116dccbf59..f3d7a10c553 100644 --- a/lightning/src/util/base32.rs +++ b/lightning/src/util/base32.rs @@ -33,10 +33,10 @@ pub enum Alphabet { /// RFC4648 encoding. RFC4648 { /// Whether to use padding. - padding: bool + padding: bool, }, /// Zbase32 encoding. - ZBase32 + ZBase32, } impl Alphabet { @@ -60,9 +60,7 @@ impl Alphabet { } ret }, - Self::ZBase32 => { - Self::encode_data(data, ZBASE_ALPHABET) - }, + Self::ZBase32 => Self::encode_data(data, ZBASE_ALPHABET), }; ret.truncate(output_length); @@ -79,7 +77,9 @@ impl Alphabet { Self::RFC4648 { padding } => { let mut unpadded_data_length = data.len(); if *padding { - if data.len() % 8 != 0 { return Err(()); } + if data.len() % 8 != 0 { + return Err(()); + } data.iter().rev().take(6).for_each(|&c| { if c == b'=' { unpadded_data_length -= 1; @@ -88,13 +88,14 @@ impl Alphabet { } (&data[..unpadded_data_length], RFC4648_INV_ALPHABET) }, - Self::ZBase32 => { - (data, ZBASE_INV_ALPHABET) - } + Self::ZBase32 => (data, ZBASE_INV_ALPHABET), }; // If the string has more characters than are required to alphabet_encode the number of bytes // decodable, treat the string as invalid. - match data.len() % 8 { 1|3|6 => return Err(()), _ => {} } + match data.len() % 8 { + 1 | 3 | 6 => return Err(()), + _ => {}, + } Ok(Self::decode_data(data, alphabet)?) } @@ -175,9 +176,13 @@ mod tests { ("6n9hq", &[0xf0, 0xbf, 0xc7]), ("4t7ye", &[0xd4, 0x7a, 0x04]), ("6im5sdy", &[0xf5, 0x57, 0xbb, 0x0c]), - ("ybndrfg8ejkmcpqxot1uwisza345h769", &[0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6, - 0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, 0xd7, 0xc6, - 0x75, 0xbe, 0x77, 0xdf]) + ( + "ybndrfg8ejkmcpqxot1uwisza345h769", + &[ + 0x00, 0x44, 0x32, 0x14, 0xc7, 0x42, 0x54, 0xb6, 0x35, 0xcf, 0x84, 0x65, 0x3a, 0x56, + 0xd7, 0xc6, 0x75, 0xbe, 0x77, 0xdf, + ], + ), ]; #[test] @@ -242,7 +247,9 @@ mod tests { } for (input, encoded) in RFC4648_NON_PADDED_TEST_VECTORS { - let res = &Alphabet::RFC4648 { padding: false }.decode(std::str::from_utf8(encoded).unwrap()).unwrap(); + let res = &Alphabet::RFC4648 { padding: false } + .decode(std::str::from_utf8(encoded).unwrap()) + .unwrap(); assert_eq!(&res[..], &input[..]); } } @@ -251,9 +258,8 @@ mod tests { fn padding() { let num_padding = [0, 6, 4, 3, 1]; for i in 1..6 { - let encoded = Alphabet::RFC4648 { padding: true }.encode( - (0..(i as u8)).collect::>().as_ref() - ); + let encoded = Alphabet::RFC4648 { padding: true } + .encode((0..(i as u8)).collect::>().as_ref()); assert_eq!(encoded.len(), 8); for j in 0..(num_padding[i % 5]) { assert_eq!(encoded.as_bytes()[encoded.len() - j - 1], b'='); diff --git a/lightning/src/util/byte_utils.rs b/lightning/src/util/byte_utils.rs index 419a6b73a69..9e023ffeb97 100644 --- a/lightning/src/util/byte_utils.rs +++ b/lightning/src/util/byte_utils.rs @@ -9,23 +9,23 @@ #[inline] pub fn slice_to_be48(v: &[u8]) -> u64 { - ((v[0] as u64) << 8*5) | - ((v[1] as u64) << 8*4) | - ((v[2] as u64) << 8*3) | - ((v[3] as u64) << 8*2) | - ((v[4] as u64) << 8*1) | - ((v[5] as u64) << 8*0) + ((v[0] as u64) << 8 * 5) + | ((v[1] as u64) << 8 * 4) + | ((v[2] as u64) << 8 * 3) + | ((v[3] as u64) << 8 * 2) + | ((v[4] as u64) << 8 * 1) + | ((v[5] as u64) << 8 * 0) } #[inline] pub fn be48_to_array(u: u64) -> [u8; 6] { assert!(u & 0xffff_0000_0000_0000 == 0); let mut v = [0; 6]; - v[0] = ((u >> 8*5) & 0xff) as u8; - v[1] = ((u >> 8*4) & 0xff) as u8; - v[2] = ((u >> 8*3) & 0xff) as u8; - v[3] = ((u >> 8*2) & 0xff) as u8; - v[4] = ((u >> 8*1) & 0xff) as u8; - v[5] = ((u >> 8*0) & 0xff) as u8; + v[0] = ((u >> 8 * 5) & 0xff) as u8; + v[1] = ((u >> 8 * 4) & 0xff) as u8; + v[2] = ((u >> 8 * 3) & 0xff) as u8; + v[3] = ((u >> 8 * 2) & 0xff) as u8; + v[4] = ((u >> 8 * 1) & 0xff) as u8; + v[5] = ((u >> 8 * 0) & 0xff) as u8; v } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index 3a0885de784..14529327ef2 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -327,7 +327,7 @@ pub struct ChannelHandshakeLimits { /// /// Default value: `2016`, which we also enforce as a maximum value so you can tweak config to /// reduce the loss of having useless locked funds (if your peer accepts) - pub their_to_self_delay: u16 + pub their_to_self_delay: u16, } impl Default for ChannelHandshakeLimits { @@ -582,7 +582,9 @@ pub struct ChannelConfig { impl ChannelConfig { /// Applies the given [`ChannelConfigUpdate`] as a partial update to the [`ChannelConfig`]. pub fn apply(&mut self, update: &ChannelConfigUpdate) { - if let Some(forwarding_fee_proportional_millionths) = update.forwarding_fee_proportional_millionths { + if let Some(forwarding_fee_proportional_millionths) = + update.forwarding_fee_proportional_millionths + { self.forwarding_fee_proportional_millionths = forwarding_fee_proportional_millionths; } if let Some(forwarding_fee_base_msat) = update.forwarding_fee_base_msat { @@ -594,7 +596,9 @@ impl ChannelConfig { if let Some(max_dust_htlc_exposure_msat) = update.max_dust_htlc_exposure_msat { self.max_dust_htlc_exposure = max_dust_htlc_exposure_msat; } - if let Some(force_close_avoidance_max_fee_satoshis) = update.force_close_avoidance_max_fee_satoshis { + if let Some(force_close_avoidance_max_fee_satoshis) = + update.force_close_avoidance_max_fee_satoshis + { self.force_close_avoidance_max_fee_satoshis = force_close_avoidance_max_fee_satoshis; } } @@ -683,11 +687,15 @@ pub struct ChannelConfigUpdate { impl From for ChannelConfigUpdate { fn from(config: ChannelConfig) -> ChannelConfigUpdate { ChannelConfigUpdate { - forwarding_fee_proportional_millionths: Some(config.forwarding_fee_proportional_millionths), + forwarding_fee_proportional_millionths: Some( + config.forwarding_fee_proportional_millionths, + ), forwarding_fee_base_msat: Some(config.forwarding_fee_base_msat), cltv_expiry_delta: Some(config.cltv_expiry_delta), max_dust_htlc_exposure_msat: Some(config.max_dust_htlc_exposure), - force_close_avoidance_max_fee_satoshis: Some(config.force_close_avoidance_max_fee_satoshis), + force_close_avoidance_max_fee_satoshis: Some( + config.force_close_avoidance_max_fee_satoshis, + ), } } } @@ -760,8 +768,9 @@ impl crate::util::ser::Readable for LegacyChannelConfig { }); let max_dust_htlc_exposure_msat_fixed_limit = max_dust_htlc_exposure_msat_fixed_limit.unwrap_or(5_000_000); - let max_dust_htlc_exposure_msat = max_dust_htlc_exposure_enum - .unwrap_or(MaxDustHTLCExposure::FixedLimitMsat(max_dust_htlc_exposure_msat_fixed_limit)); + let max_dust_htlc_exposure_msat = max_dust_htlc_exposure_enum.unwrap_or( + MaxDustHTLCExposure::FixedLimitMsat(max_dust_htlc_exposure_msat_fixed_limit), + ); Ok(Self { options: ChannelConfig { forwarding_fee_proportional_millionths, diff --git a/lightning/src/util/errors.rs b/lightning/src/util/errors.rs index 735ce044f81..ba423aa1c96 100644 --- a/lightning/src/util/errors.rs +++ b/lightning/src/util/errors.rs @@ -24,7 +24,7 @@ pub enum APIError { /// are documented, but generally indicates some precondition of a function was violated. APIMisuseError { /// A human-readable error message - err: String + err: String, }, /// Due to a high feerate, we were unable to complete the request. /// For example, this may be returned if the feerate implies we cannot open a channel at the @@ -33,20 +33,20 @@ pub enum APIError { /// A human-readable error message err: String, /// The feerate which was too high. - feerate: u32 + feerate: u32, }, /// A malformed Route was provided (eg overflowed value, node id mismatch, overly-looped route, /// too-many-hops, etc). InvalidRoute { /// A human-readable error message - err: String + err: String, }, /// We were unable to complete the request as the Channel required to do so is unable to /// complete the request (or was not found). This can take many forms, including disconnected /// peer, channel at capacity, channel shutting down, etc. ChannelUnavailable { /// A human-readable error message - err: String + err: String, }, /// An attempt to call [`chain::Watch::watch_channel`]/[`chain::Watch::update_channel`] /// returned a [`ChannelMonitorUpdateStatus::InProgress`] indicating the persistence of a @@ -74,11 +74,15 @@ pub enum APIError { impl fmt::Debug for APIError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - APIError::APIMisuseError {ref err} => write!(f, "Misuse error: {}", err), - APIError::FeeRateTooHigh {ref err, ref feerate} => write!(f, "{} feerate: {}", err, feerate), - APIError::InvalidRoute {ref err} => write!(f, "Invalid route provided: {}", err), - APIError::ChannelUnavailable {ref err} => write!(f, "Channel unavailable: {}", err), - APIError::MonitorUpdateInProgress => f.write_str("Client indicated a channel monitor update is in progress but not yet complete"), + APIError::APIMisuseError { ref err } => write!(f, "Misuse error: {}", err), + APIError::FeeRateTooHigh { ref err, ref feerate } => { + write!(f, "{} feerate: {}", err, feerate) + }, + APIError::InvalidRoute { ref err } => write!(f, "Invalid route provided: {}", err), + APIError::ChannelUnavailable { ref err } => write!(f, "Channel unavailable: {}", err), + APIError::MonitorUpdateInProgress => f.write_str( + "Client indicated a channel monitor update is in progress but not yet complete", + ), APIError::IncompatibleShutdownScript { ref script } => { write!(f, "Provided a scriptpubkey format not accepted by peer: {}", script) }, @@ -101,9 +105,9 @@ impl_writeable_tlv_based_enum_upgradable!(APIError, #[inline] pub(crate) fn get_onion_debug_field(error_code: u16) -> (&'static str, usize) { match error_code & 0xff { - 4|5|6 => ("sha256_of_onion", 32), - 11|12 => ("htlc_msat", 8), - 13|18 => ("cltv_expiry", 4), + 4 | 5 | 6 => ("sha256_of_onion", 32), + 11 | 12 => ("htlc_msat", 8), + 13 | 18 => ("cltv_expiry", 4), 19 => ("incoming_htlc_msat", 8), 20 => ("flags", 2), _ => ("", 0), diff --git a/lightning/src/util/fuzz_wrappers.rs b/lightning/src/util/fuzz_wrappers.rs index dec3d8f6590..6475065b8be 100644 --- a/lightning/src/util/fuzz_wrappers.rs +++ b/lightning/src/util/fuzz_wrappers.rs @@ -8,19 +8,17 @@ // licenses. macro_rules! hash_to_message { - ($slice: expr) => { + ($slice: expr) => {{ + #[cfg(not(fuzzing))] { - #[cfg(not(fuzzing))] - { - ::bitcoin::secp256k1::Message::from_digest_slice($slice).unwrap() - } - #[cfg(fuzzing)] - { - match ::bitcoin::secp256k1::Message::from_digest_slice($slice) { - Ok(msg) => msg, - Err(_) => ::bitcoin::secp256k1::Message::from_digest([1; 32]) - } + ::bitcoin::secp256k1::Message::from_digest_slice($slice).unwrap() + } + #[cfg(fuzzing)] + { + match ::bitcoin::secp256k1::Message::from_digest_slice($slice) { + Ok(msg) => msg, + Err(_) => ::bitcoin::secp256k1::Message::from_digest([1; 32]), } } - } + }}; } diff --git a/lightning/src/util/indexed_map.rs b/lightning/src/util/indexed_map.rs index dd29e4ddcb9..34860e3d68a 100644 --- a/lightning/src/util/indexed_map.rs +++ b/lightning/src/util/indexed_map.rs @@ -31,18 +31,12 @@ pub struct IndexedMap { impl IndexedMap { /// Constructs a new, empty map pub fn new() -> Self { - Self { - map: new_hash_map(), - keys: Vec::new(), - } + Self { map: new_hash_map(), keys: Vec::new() } } /// Constructs a new, empty map with the given capacity pre-allocated pub fn with_capacity(capacity: usize) -> Self { - Self { - map: hash_map_with_capacity(capacity), - keys: Vec::with_capacity(capacity), - } + Self { map: hash_map_with_capacity(capacity), keys: Vec::with_capacity(capacity) } } #[inline(always)] @@ -71,7 +65,8 @@ impl IndexedMap { pub fn remove(&mut self, key: &K) -> Option { let ret = self.map.remove(key); if let Some(_) = ret { - let idx = self.keys.iter().position(|k| k == key).expect("map and keys must be consistent"); + let idx = + self.keys.iter().position(|k| k == key).expect("map and keys must be consistent"); self.keys.remove(idx); } ret @@ -91,18 +86,11 @@ impl IndexedMap { pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { match self.map.entry(key.clone()) { hash_map::Entry::Vacant(entry) => { - Entry::Vacant(VacantEntry { - underlying_entry: entry, - key, - keys: &mut self.keys, - }) + Entry::Vacant(VacantEntry { underlying_entry: entry, key, keys: &mut self.keys }) }, hash_map::Entry::Occupied(entry) => { - Entry::Occupied(OccupiedEntry { - underlying_entry: entry, - keys: &mut self.keys, - }) - } + Entry::Occupied(OccupiedEntry { underlying_entry: entry, keys: &mut self.keys }) + }, } } @@ -128,18 +116,19 @@ impl IndexedMap { let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(key) => self.keys.binary_search(key).unwrap_or_else(|index| index), - Bound::Excluded(key) => self.keys.binary_search(key).map(|index| index +1).unwrap_or_else(|index| index), + Bound::Excluded(key) => { + self.keys.binary_search(key).map(|index| index + 1).unwrap_or_else(|index| index) + }, }; let end = match range.end_bound() { Bound::Unbounded => self.keys.len(), - Bound::Included(key) => self.keys.binary_search(key).map(|index| index +1).unwrap_or_else(|index| index), + Bound::Included(key) => { + self.keys.binary_search(key).map(|index| index + 1).unwrap_or_else(|index| index) + }, Bound::Excluded(key) => self.keys.binary_search(key).unwrap_or_else(|index| index), }; - Range { - inner_range: self.keys[start..end].iter(), - map: &self.map, - } + Range { inner_range: self.keys[start..end].iter(), map: &self.map } } /// Returns the number of `key`/`value` pairs in the map @@ -169,9 +158,9 @@ pub struct Range<'a, K: Hash + Ord, V> { impl<'a, K: Hash + Ord, V: 'a> Iterator for Range<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option<(&'a K, &'a V)> { - self.inner_range.next().map(|k| { - (k, self.map.get(k).expect("map and keys must be consistent")) - }) + self.inner_range + .next() + .map(|k| (k, self.map.get(k).expect("map and keys must be consistent"))) } } @@ -215,7 +204,8 @@ impl<'a, K: Hash + Ord, V> OccupiedEntry<'a, K, V> { /// Remove the value at the position described by this entry. pub fn remove_entry(self) -> (K, V) { let res = self.underlying_entry.remove_entry(); - let idx = self.keys.iter().position(|k| k == &res.0).expect("map and keys must be consistent"); + let idx = + self.keys.iter().position(|k| k == &res.0).expect("map and keys must be consistent"); self.keys.remove(idx); res } diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index 56e609ca4e0..0b29ea1a98f 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -169,7 +169,10 @@ pub trait Logger { /// /// This is not exported to bindings users as lifetimes are problematic and there's little reason /// for this to be used downstream anyway. -pub struct WithContext<'a, L: Deref> where L::Target: Logger { +pub struct WithContext<'a, L: Deref> +where + L::Target: Logger, +{ /// The logger to delegate to after adding context to the record. logger: &'a L, /// The node id of the peer pertaining to the logged record. @@ -177,10 +180,13 @@ pub struct WithContext<'a, L: Deref> where L::Target: Logger { /// The channel id of the channel pertaining to the logged record. channel_id: Option, /// The payment hash of the payment pertaining to the logged record. - payment_hash: Option + payment_hash: Option, } -impl<'a, L: Deref> Logger for WithContext<'a, L> where L::Target: Logger { +impl<'a, L: Deref> Logger for WithContext<'a, L> +where + L::Target: Logger, +{ fn log(&self, mut record: Record) { if self.peer_id.is_some() { record.peer_id = self.peer_id @@ -195,15 +201,16 @@ impl<'a, L: Deref> Logger for WithContext<'a, L> where L::Target: Logger { } } -impl<'a, L: Deref> WithContext<'a, L> where L::Target: Logger { +impl<'a, L: Deref> WithContext<'a, L> +where + L::Target: Logger, +{ /// Wraps the given logger, providing additional context to any logged records. - pub fn from(logger: &'a L, peer_id: Option, channel_id: Option, payment_hash: Option) -> Self { - WithContext { - logger, - peer_id, - channel_id, - payment_hash, - } + pub fn from( + logger: &'a L, peer_id: Option, channel_id: Option, + payment_hash: Option, + ) -> Self { + WithContext { logger, peer_id, channel_id, payment_hash } } } @@ -257,12 +264,12 @@ impl + Clone> fmt::Display fo #[cfg(test)] mod tests { - use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1}; use crate::ln::types::ChannelId; use crate::ln::PaymentHash; - use crate::util::logger::{Logger, Level, WithContext}; - use crate::util::test_utils::TestLogger; use crate::sync::Arc; + use crate::util::logger::{Level, Logger, WithContext}; + use crate::util::test_utils::TestLogger; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; #[test] fn test_level_show() { @@ -272,14 +279,12 @@ mod tests { } struct WrapperLog { - logger: Arc + logger: Arc, } impl WrapperLog { fn new(logger: Arc) -> WrapperLog { - WrapperLog { - logger, - } + WrapperLog { logger } } fn call_macros(&self) { @@ -295,7 +300,7 @@ mod tests { #[test] fn test_logging_macros() { let logger = TestLogger::new(); - let logger : Arc = Arc::new(logger); + let logger: Arc = Arc::new(logger); let wrapper = WrapperLog::new(Arc::clone(&logger)); wrapper.call_macros(); } @@ -306,7 +311,8 @@ mod tests { let secp_ctx = Secp256k1::new(); let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let payment_hash = PaymentHash([0; 32]); - let context_logger = WithContext::from(&logger, Some(pk), Some(ChannelId([0; 32])), Some(payment_hash)); + let context_logger = + WithContext::from(&logger, Some(pk), Some(ChannelId([0; 32])), Some(payment_hash)); log_error!(context_logger, "This is an error"); log_warn!(context_logger, "This is an error"); log_debug!(context_logger, "This is an error"); @@ -314,7 +320,10 @@ mod tests { log_gossip!(context_logger, "This is an error"); log_info!(context_logger, "This is an error"); logger.assert_log_context_contains( - "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6 + "lightning::util::logger::tests", + Some(pk), + Some(ChannelId([0; 32])), + 6, ); } @@ -324,7 +333,8 @@ mod tests { let secp_ctx = Secp256k1::new(); let pk = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let payment_hash = PaymentHash([0; 32]); - let context_logger = &WithContext::from(&logger, None, Some(ChannelId([0; 32])), Some(payment_hash)); + let context_logger = + &WithContext::from(&logger, None, Some(ChannelId([0; 32])), Some(payment_hash)); let full_context_logger = WithContext::from(&context_logger, Some(pk), None, None); log_error!(full_context_logger, "This is an error"); log_warn!(full_context_logger, "This is an error"); @@ -333,7 +343,10 @@ mod tests { log_gossip!(full_context_logger, "This is an error"); log_info!(full_context_logger, "This is an error"); logger.assert_log_context_contains( - "lightning::util::logger::tests", Some(pk), Some(ChannelId([0;32])), 6 + "lightning::util::logger::tests", + Some(pk), + Some(ChannelId([0; 32])), + 6, ); } diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 10452c57d7b..19e9bb710d0 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -12,13 +12,13 @@ use crate::sign::SpendableOutputDescriptor; use bitcoin::transaction::Transaction; -use crate::routing::router::Route; use crate::ln::chan_utils::HTLCClaim; +use crate::routing::router::Route; macro_rules! log_iter { ($obj: expr) => { $crate::util::logger::DebugIter($obj) - } + }; } /// Logs a pubkey in hex format. @@ -26,7 +26,7 @@ macro_rules! log_iter { macro_rules! log_pubkey { ($obj: expr) => { $crate::util::logger::DebugPubKey(&$obj) - } + }; } /// Logs a byte slice in hex format. @@ -34,7 +34,7 @@ macro_rules! log_pubkey { macro_rules! log_bytes { ($obj: expr) => { $crate::util::logger::DebugBytes(&$obj) - } + }; } pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId); @@ -45,10 +45,8 @@ impl<'a> core::fmt::Display for DebugFundingInfo<'a> { } macro_rules! log_funding_info { ($key_storage: expr) => { - $crate::util::macro_logger::DebugFundingInfo( - &$key_storage.channel_id() - ) - } + $crate::util::macro_logger::DebugFundingInfo(&$key_storage.channel_id()) + }; } pub(crate) struct DebugRoute<'a>(pub &'a Route); @@ -57,7 +55,14 @@ impl<'a> core::fmt::Display for DebugRoute<'a> { for (idx, p) in self.0.paths.iter().enumerate() { writeln!(f, "path {}:", idx)?; for h in p.hops.iter() { - writeln!(f, " node_id: {}, short_channel_id: {}, fee_msat: {}, cltv_expiry_delta: {}", log_pubkey!(h.pubkey), h.short_channel_id, h.fee_msat, h.cltv_expiry_delta)?; + writeln!( + f, + " node_id: {}, short_channel_id: {}, fee_msat: {}, cltv_expiry_delta: {}", + log_pubkey!(h.pubkey), + h.short_channel_id, + h.fee_msat, + h.cltv_expiry_delta + )?; } writeln!(f, " blinded_tail: {:?}", p.blinded_tail)?; } @@ -67,7 +72,7 @@ impl<'a> core::fmt::Display for DebugRoute<'a> { macro_rules! log_route { ($obj: expr) => { $crate::util::macro_logger::DebugRoute(&$obj) - } + }; } pub(crate) struct DebugTx<'a>(pub &'a Transaction); @@ -76,14 +81,21 @@ impl<'a> core::fmt::Display for DebugTx<'a> { if self.0.input.len() >= 1 && self.0.input.iter().any(|i| !i.witness.is_empty()) { let first_input = &self.0.input[0]; let witness_script_len = first_input.witness.last().unwrap_or(&[]).len(); - if self.0.input.len() == 1 && witness_script_len == 71 && - (first_input.sequence.0 >> 8*3) as u8 == 0x80 { + if self.0.input.len() == 1 + && witness_script_len == 71 + && (first_input.sequence.0 >> 8 * 3) as u8 == 0x80 + { write!(f, "commitment tx ")?; } else if self.0.input.len() == 1 && witness_script_len == 71 { write!(f, "closing tx ")?; - } else if self.0.input.len() == 1 && HTLCClaim::from_witness(&first_input.witness) == Some(HTLCClaim::OfferedTimeout) { + } else if self.0.input.len() == 1 + && HTLCClaim::from_witness(&first_input.witness) == Some(HTLCClaim::OfferedTimeout) + { write!(f, "HTLC-timeout tx ")?; - } else if self.0.input.len() == 1 && HTLCClaim::from_witness(&first_input.witness) == Some(HTLCClaim::AcceptedPreimage) { + } else if self.0.input.len() == 1 + && HTLCClaim::from_witness(&first_input.witness) + == Some(HTLCClaim::AcceptedPreimage) + { write!(f, "HTLC-success tx ")?; } else { let mut num_preimage = 0; @@ -92,15 +104,22 @@ impl<'a> core::fmt::Display for DebugTx<'a> { for inp in &self.0.input { let htlc_claim = HTLCClaim::from_witness(&inp.witness); match htlc_claim { - Some(HTLCClaim::AcceptedPreimage)|Some(HTLCClaim::OfferedPreimage) => num_preimage += 1, - Some(HTLCClaim::AcceptedTimeout)|Some(HTLCClaim::OfferedTimeout) => num_timeout += 1, + Some(HTLCClaim::AcceptedPreimage) | Some(HTLCClaim::OfferedPreimage) => { + num_preimage += 1 + }, + Some(HTLCClaim::AcceptedTimeout) | Some(HTLCClaim::OfferedTimeout) => { + num_timeout += 1 + }, Some(HTLCClaim::Revocation) => num_revoked += 1, None => continue, } } if num_preimage > 0 || num_timeout > 0 || num_revoked > 0 { - write!(f, "HTLC claim tx ({} preimage, {} timeout, {} revoked) ", - num_preimage, num_timeout, num_revoked)?; + write!( + f, + "HTLC claim tx ({} preimage, {} timeout, {} revoked) ", + num_preimage, num_timeout, num_revoked + )?; } } } else { @@ -115,7 +134,7 @@ impl<'a> core::fmt::Display for DebugTx<'a> { macro_rules! log_tx { ($obj: expr) => { $crate::util::macro_logger::DebugTx(&$obj) - } + }; } pub(crate) struct DebugSpendable<'a>(pub &'a SpendableOutputDescriptor); @@ -124,13 +143,21 @@ impl<'a> core::fmt::Display for DebugSpendable<'a> { match self.0 { &SpendableOutputDescriptor::StaticOutput { ref outpoint, .. } => { write!(f, "StaticOutput {}:{} marked for spending", outpoint.txid, outpoint.index)?; - } + }, &SpendableOutputDescriptor::DelayedPaymentOutput(ref descriptor) => { - write!(f, "DelayedPaymentOutput {}:{} marked for spending", descriptor.outpoint.txid, descriptor.outpoint.index)?; - } + write!( + f, + "DelayedPaymentOutput {}:{} marked for spending", + descriptor.outpoint.txid, descriptor.outpoint.index + )?; + }, &SpendableOutputDescriptor::StaticPaymentOutput(ref descriptor) => { - write!(f, "StaticPaymentOutput {}:{} marked for spending", descriptor.outpoint.txid, descriptor.outpoint.index)?; - } + write!( + f, + "StaticPaymentOutput {}:{} marked for spending", + descriptor.outpoint.txid, descriptor.outpoint.index + )?; + }, } Ok(()) } @@ -139,7 +166,7 @@ impl<'a> core::fmt::Display for DebugSpendable<'a> { macro_rules! log_spendable { ($obj: expr) => { $crate::util::macro_logger::DebugSpendable(&$obj) - } + }; } /// Create a new Record and log it. You probably don't want to use this macro directly, diff --git a/lightning/src/util/message_signing.rs b/lightning/src/util/message_signing.rs index 0a45af26f86..d16fc73df7c 100644 --- a/lightning/src/util/message_signing.rs +++ b/lightning/src/util/message_signing.rs @@ -47,7 +47,7 @@ fn sigrec_decode(sig_rec: Vec) -> Result { match RecoveryId::from_i32(rid) { Ok(x) => RecoverableSignature::from_compact(rsig, x), - Err(e) => Err(e) + Err(e) => Err(e), } } @@ -63,18 +63,18 @@ pub fn sign(msg: &[u8], sk: &SecretKey) -> String { } /// Recovers the PublicKey of the signer of the message given the message and the signature. -pub fn recover_pk(msg: &[u8], sig: &str) -> Result { +pub fn recover_pk(msg: &[u8], sig: &str) -> Result { let secp_ctx = Secp256k1::verification_only(); let msg_hash = sha256d::Hash::hash(&[LN_MESSAGE_PREFIX, msg].concat()); match base32::Alphabet::ZBase32.decode(&sig) { - Ok(sig_rec) => { - match sigrec_decode(sig_rec) { - Ok(sig) => secp_ctx.recover_ecdsa(&Message::from_digest(msg_hash.to_byte_array()), &sig), - Err(e) => Err(e) - } + Ok(sig_rec) => match sigrec_decode(sig_rec) { + Ok(sig) => { + secp_ctx.recover_ecdsa(&Message::from_digest(msg_hash.to_byte_array()), &sig) + }, + Err(e) => Err(e), }, - Err(_) => Err(Error::InvalidSignature) + Err(_) => Err(Error::InvalidSignature), } } @@ -83,16 +83,16 @@ pub fn recover_pk(msg: &[u8], sig: &str) -> Result { pub fn verify(msg: &[u8], sig: &str, pk: &PublicKey) -> bool { match recover_pk(msg, sig) { Ok(x) => x == *pk, - Err(_) => false + Err(_) => false, } } #[cfg(test)] mod test { - use core::str::FromStr; - use crate::util::message_signing::{sign, recover_pk, verify}; + use crate::util::message_signing::{recover_pk, sign, verify}; use bitcoin::secp256k1::constants::ONE; - use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1}; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + use core::str::FromStr; #[test] fn test_sign() { @@ -148,4 +148,3 @@ mod test { } } } - diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 64fe84a09a7..d546749dd48 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -10,28 +10,31 @@ //! //! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager +use bitcoin::{BlockHash, Txid}; use core::cmp; use core::ops::Deref; use core::str::FromStr; -use bitcoin::{BlockHash, Txid}; -use crate::{io, log_error}; use crate::prelude::*; +use crate::{io, log_error}; use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::chainmonitor::Persist; -use crate::sign::{EntropySource, ecdsa::EcdsaChannelSigner, SignerProvider}; +use crate::chain::channelmonitor::{ + ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID, +}; use crate::chain::transaction::OutPoint; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID}; use crate::ln::channelmanager::AChannelManager; use crate::routing::gossip::NetworkGraph; use crate::routing::scoring::WriteableScore; +use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, Writeable}; /// The alphabet of characters allowed for namespaces and keys. -pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"; +pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"; /// The maximum number of characters namespaces and keys may have. pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120; @@ -124,12 +127,16 @@ pub trait KVStore { /// `primary_namespace` and `secondary_namespace`. /// /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound - fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result, io::Error>; + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, io::Error>; /// Persists the given data under the given `key`. /// /// Will create the given `primary_namespace` and `secondary_namespace` if not already present /// in the store. - fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>; + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + ) -> Result<(), io::Error>; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -145,13 +152,17 @@ pub trait KVStore { /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. - fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>; + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), io::Error>; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result, io::Error>; + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, io::Error>; } /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk. @@ -175,7 +186,6 @@ where fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>; } - impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: Deref> Persister<'a, CM, L, S> for A where CM::Target: 'static + AChannelManager, @@ -183,24 +193,30 @@ where S::Target: WriteableScore<'a>, { fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> { - self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + self.write( + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, - &channel_manager.get_cm().encode()) + &channel_manager.get_cm().encode(), + ) } fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), io::Error> { - self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + self.write( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, - &network_graph.encode()) + &network_graph.encode(), + ) } fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> { - self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + self.write( + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - &scorer.encode()) + &scorer.encode(), + ) } } @@ -210,27 +226,34 @@ impl Persist) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel( + &self, funding_txo: OutPoint, monitor: &ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &key, &monitor.encode()) - { + &key, + &monitor.encode(), + ) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, - Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError + Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } } - fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel( + &self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, + monitor: &ChannelMonitor, + ) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &key, &monitor.encode()) - { + &key, + &monitor.encode(), + ) { Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, - Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError + Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, } } @@ -242,7 +265,7 @@ impl Persist monitor, - Err(_) => return + Err(_) => return, }; match self.write( ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -250,8 +273,8 @@ impl Persist {} - Err(_e) => return + Ok(()) => {}, + Err(_e) => return, }; let _ = self.remove( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -274,12 +297,14 @@ where let mut res = Vec::new(); for stored_key in kv_store.list( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)? - { + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + )? { if stored_key.len() < 66 { return Err(io::Error::new( io::ErrorKind::InvalidData, - "Stored key has invalid length")); + "Stored key has invalid length", + )); } let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| { @@ -291,8 +316,11 @@ where })?; match <(BlockHash, ChannelMonitor<::EcdsaSigner>)>::read( - &mut io::Cursor::new( - kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?), + &mut io::Cursor::new(kv_store.read( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + &stored_key, + )?), (&*entropy_source, &*signer_provider), ) { Ok((block_hash, channel_monitor)) => { @@ -305,13 +333,13 @@ where )); } res.push((block_hash, channel_monitor)); - } + }, Err(_) => { return Err(io::Error::new( io::ErrorKind::InvalidData, - "Failed to read ChannelMonitor" + "Failed to read ChannelMonitor", )) - } + }, } } Ok(res) @@ -407,7 +435,7 @@ where ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator + FE::Target: FeeEstimator, { kv_store: K, logger: L, @@ -415,7 +443,7 @@ where entropy_source: ES, signer_provider: SP, broadcaster: BI, - fee_estimator: FE + fee_estimator: FE, } #[allow(dead_code)] @@ -427,7 +455,7 @@ where ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator + FE::Target: FeeEstimator, { /// Constructs a new [`MonitorUpdatingPersister`]. /// @@ -447,7 +475,7 @@ where /// [`MonitorUpdatingPersister::cleanup_stale_updates`]. pub fn new( kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES, - signer_provider: SP, broadcaster: BI, fee_estimator: FE + signer_provider: SP, broadcaster: BI, fee_estimator: FE, ) -> Self { MonitorUpdatingPersister { kv_store, @@ -456,7 +484,7 @@ where entropy_source, signer_provider, broadcaster, - fee_estimator + fee_estimator, } } @@ -465,7 +493,12 @@ where /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. - pub fn read_all_channel_monitors_with_updates(&self) -> Result::EcdsaSigner>)>, io::Error> { + pub fn read_all_channel_monitors_with_updates( + &self, + ) -> Result< + Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > { let monitor_list = self.kv_store.list( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -496,7 +529,8 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub fn read_channel_monitor_with_updates( &self, monitor_key: String, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> { + ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + { let monitor_name = MonitorName::new(monitor_key)?; let (block_hash, monitor) = self.read_monitor(&monitor_name)?; let mut current_update_id = monitor.get_latest_update_id(); @@ -511,21 +545,22 @@ where Err(err) if err.kind() == io::ErrorKind::NotFound => { // We can't find any more updates, so we are done. break; - } + }, Err(err) => return Err(err), }; - monitor.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) + monitor + .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) .map_err(|e| { - log_error!( - self.logger, - "Monitor update failed. monitor: {} update: {} reason: {:?}", - monitor_name.as_str(), - update_name.as_str(), - e - ); - io::Error::new(io::ErrorKind::Other, "Monitor update failed") - })?; + log_error!( + self.logger, + "Monitor update failed. monitor: {} update: {} reason: {:?}", + monitor_name.as_str(), + update_name.as_str(), + e + ); + io::Error::new(io::ErrorKind::Other, "Monitor update failed") + })?; } Ok((block_hash, monitor)) } @@ -533,7 +568,8 @@ where /// Read a channel monitor. fn read_monitor( &self, monitor_name: &MonitorName, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> { + ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + { let outpoint: OutPoint = monitor_name.try_into()?; let mut monitor_cursor = io::Cursor::new(self.kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -564,7 +600,7 @@ where } else { Ok((blockhash, channel_monitor)) } - } + }, Err(e) => { log_error!( self.logger, @@ -573,7 +609,7 @@ where e, ); Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor")) - } + }, } } @@ -613,9 +649,10 @@ where for monitor_key in monitor_keys { let monitor_name = MonitorName::new(monitor_key)?; let (_, current_monitor) = self.read_monitor(&monitor_name)?; - let updates = self - .kv_store - .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?; + let updates = self.kv_store.list( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str(), + )?; for update in updates { let update_name = UpdateName::new(update)?; // if the update_id is lower than the stored monitor, delete @@ -633,20 +670,27 @@ where } } -impl - Persist for MonitorUpdatingPersister +impl< + ChannelSigner: EcdsaChannelSigner, + K: Deref, + L: Deref, + ES: Deref, + SP: Deref, + BI: Deref, + FE: Deref, + > Persist for MonitorUpdatingPersister where K::Target: KVStore, L::Target: Logger, ES::Target: EntropySource + Sized, SP::Target: SignerProvider + Sized, BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator + FE::Target: FeeEstimator, { /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStore`]. fn persist_new_channel( - &self, funding_txo: OutPoint, monitor: &ChannelMonitor + &self, funding_txo: OutPoint, monitor: &ChannelMonitor, ) -> chain::ChannelMonitorUpdateStatus { // Determine the proper key for this monitor let monitor_name = MonitorName::from(funding_txo); @@ -662,9 +706,7 @@ where monitor_name.as_str(), &monitor_bytes, ) { - Ok(_) => { - chain::ChannelMonitorUpdateStatus::Completed - } + Ok(_) => chain::ChannelMonitorUpdateStatus::Completed, Err(e) => { log_error!( self.logger, @@ -675,7 +717,7 @@ where e ); chain::ChannelMonitorUpdateStatus::UnrecoverableError - } + }, } } @@ -690,7 +732,7 @@ where /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`] fn update_persisted_channel( &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor + monitor: &ChannelMonitor, ) -> chain::ChannelMonitorUpdateStatus { if let Some(update) = update { if update.update_id != CLOSED_CHANNEL_UPDATE_ID @@ -715,7 +757,7 @@ where e ); chain::ChannelMonitorUpdateStatus::UnrecoverableError - } + }, } } else { let monitor_name = MonitorName::from(funding_txo); @@ -723,29 +765,30 @@ where // the new one in order to determine the cleanup range. let maybe_old_monitor = match monitor.get_latest_update_id() { CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(), - _ => None + _ => None, }; // We could write this update, but it meets criteria of our design that calls for a full monitor write. let monitor_update_status = self.persist_new_channel(funding_txo, monitor); if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status { - let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { - // If there is an error while reading old monitor, we skip clean up. - maybe_old_monitor.map(|(_, ref old_monitor)| { - let start = old_monitor.get_latest_update_id(); - // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID - let end = cmp::min( - start.saturating_add(self.maximum_pending_updates), - CLOSED_CHANNEL_UPDATE_ID - 1, - ); - (start, end) - }) - } else { - let end = monitor.get_latest_update_id(); - let start = end.saturating_sub(self.maximum_pending_updates); - Some((start, end)) - }; + let cleanup_range = + if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { + // If there is an error while reading old monitor, we skip clean up. + maybe_old_monitor.map(|(_, ref old_monitor)| { + let start = old_monitor.get_latest_update_id(); + // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID + let end = cmp::min( + start.saturating_add(self.maximum_pending_updates), + CLOSED_CHANNEL_UPDATE_ID - 1, + ); + (start, end) + }) + } else { + let end = monitor.get_latest_update_id(); + let start = end.saturating_sub(self.maximum_pending_updates); + Some((start, end)) + }; if let Some((start, end)) = cleanup_range { self.cleanup_in_range(monitor_name, start, end); @@ -765,7 +808,7 @@ where let monitor_key = monitor_name.as_str().to_string(); let monitor = match self.read_channel_monitor_with_updates(monitor_key) { Ok((_block_hash, monitor)) => monitor, - Err(_) => return + Err(_) => return, }; match self.kv_store.write( ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -773,7 +816,7 @@ where monitor_name.as_str(), &monitor.encode(), ) { - Ok(()) => {} + Ok(()) => {}, Err(_e) => return, }; let _ = self.kv_store.remove( @@ -785,14 +828,15 @@ where } } -impl MonitorUpdatingPersister +impl + MonitorUpdatingPersister where ES::Target: EntropySource + Sized, K::Target: KVStore, L::Target: Logger, SP::Target: SignerProvider + Sized, BI::Target: BroadcasterInterface, - FE::Target: FeeEstimator + FE::Target: FeeEstimator, { // Cleans up monitor updates for given monitor in range `start..=end`. fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) { @@ -883,7 +927,7 @@ impl UpdateName { Ok(u) => Ok(u.into()), Err(_) => { Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name")) - } + }, } } @@ -905,10 +949,10 @@ mod tests { use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, MessageSendEventsProvider}; use crate::ln::functional_test_utils::*; - use crate::util::test_utils::{self, TestLogger, TestStore}; - use crate::{check_added_monitors, check_closed_broadcast}; use crate::sync::Arc; use crate::util::test_channel_signer::TestChannelSigner; + use crate::util::test_utils::{self, TestLogger, TestStore}; + use crate::{check_added_monitors, check_closed_broadcast}; const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; @@ -928,23 +972,44 @@ mod tests { #[test] fn monitor_from_outpoint_works() { let monitor_name1 = MonitorName::from(OutPoint { - txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(), + txid: Txid::from_str( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + ) + .unwrap(), index: 1, }); - assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1"); + assert_eq!( + monitor_name1.as_str(), + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1" + ); let monitor_name2 = MonitorName::from(OutPoint { - txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(), + txid: Txid::from_str( + "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef", + ) + .unwrap(), index: u16::MAX, }); - assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535"); + assert_eq!( + monitor_name2.as_str(), + "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535" + ); } #[test] fn bad_monitor_string_fails() { - assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err()); - assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err()); - assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err()); + assert!(MonitorName::new( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string() + ) + .is_err()); + assert!(MonitorName::new( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string() + ) + .is_err()); + assert!(MonitorName::new( + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string() + ) + .is_err()); } // Exercise the `MonitorUpdatingPersister` with real channels and payments. @@ -997,15 +1062,18 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); + let mut persisted_chan_data_0 = + persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); - let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); + let mut persisted_chan_data_1 = + persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { - persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); + persisted_chan_data_0 = + persister_0.read_all_channel_monitors_with_updates().unwrap(); // check that we stored only one monitor assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { @@ -1015,26 +1083,41 @@ mod tests { // if the CM is at consolidation threshold, ensure no updates are stored. let monitor_name = MonitorName::from(mon.get_funding_txo().0); if mon.get_latest_update_id() % persister_0_max_pending_updates == 0 - || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { + || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID + { assert_eq!( - persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - monitor_name.as_str()).unwrap().len(), + persister_0 + .kv_store + .list( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str() + ) + .unwrap() + .len(), 0, "updates stored when they shouldn't be in persister 0" ); } } - persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); + persisted_chan_data_1 = + persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); let monitor_name = MonitorName::from(mon.get_funding_txo().0); // if the CM is at consolidation threshold, ensure no updates are stored. if mon.get_latest_update_id() % persister_1_max_pending_updates == 0 - || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { + || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID + { assert_eq!( - persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - monitor_name.as_str()).unwrap().len(), + persister_1 + .kv_store + .list( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str() + ) + .unwrap() + .len(), 0, "updates stored when they shouldn't be in persister 1" ); @@ -1072,20 +1155,26 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let node_id_1 = nodes[1].node.get_our_node_id(); + let chan_id = nodes[0].node.list_channels()[0].channel_id; + let err_msg = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap(); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); - - connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()])); + let txn = vec![node_txn[0].clone(), node_txn[0].clone()]; + let dummy_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); + connect_block(&nodes[1], &dummy_block); check_closed_broadcast!(nodes[1], true); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + let node_id_0 = nodes[0].node.get_our_node_id(); + check_closed_event(&nodes[1], 1, reason, false, &[node_id_0], 100000); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. @@ -1096,8 +1185,22 @@ mod tests { let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); // The channel should have 0 updates, as it wrote a full monitor and consolidated. - assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0); - assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0); + assert_eq!( + persister_0 + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()) + .unwrap() + .len(), + 0 + ); + assert_eq!( + persister_1 + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()) + .unwrap() + .len(), + 0 + ); } // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a @@ -1111,14 +1214,19 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000); + let err_msg = "Channel force-closed".to_string(); + let node_id_0 = nodes[0].node.get_our_node_id(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_id_0, err_msg).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_id_0], 100000); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0]; - let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; + let txid = + Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be") + .unwrap(); + let test_txo = OutPoint { txid, index: 0 }; let ro_persister = MonitorUpdatingPersister { kv_store: &TestStore::new(true), @@ -1132,24 +1240,24 @@ mod tests { match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result - } + }, ChannelMonitorUpdateStatus::Completed => { panic!("Completed persisting new channel when shouldn't have") - } + }, ChannelMonitorUpdateStatus::InProgress => { panic!("Returned InProgress when shouldn't have") - } + }, } match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result - } + }, ChannelMonitorUpdateStatus::Completed => { panic!("Completed persisting new channel when shouldn't have") - } + }, ChannelMonitorUpdateStatus::InProgress => { panic!("Returned InProgress when shouldn't have") - } + }, } added_monitors.clear(); } @@ -1219,7 +1327,12 @@ mod tests { let monitor_name = MonitorName::from(monitor.get_funding_txo().0); persister_0 .kv_store - .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1]) + .write( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str(), + UpdateName::from(1).as_str(), + &[0u8; 1], + ) .unwrap(); // Do the stale update cleanup @@ -1228,20 +1341,32 @@ mod tests { // Confirm the stale update is unreadable/gone assert!(persister_0 .kv_store - .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str()) + .read( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str(), + UpdateName::from(1).as_str() + ) .is_err()); // Force close. - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let chan_id = nodes[0].node.list_channels()[0].channel_id; + let node_id_1 = nodes[1].node.get_our_node_id(); + let err_msg = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); // Write an update near u64::MAX persister_0 .kv_store - .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1]) + .write( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str(), + UpdateName::from(u64::MAX - 1).as_str(), + &[0u8; 1], + ) .unwrap(); // Do the stale update cleanup @@ -1250,11 +1375,18 @@ mod tests { // Confirm the stale update is unreadable/gone assert!(persister_0 .kv_store - .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str()) + .read( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_name.as_str(), + UpdateName::from(u64::MAX - 1).as_str() + ) .is_err()); } - fn persist_fn(_persist: P) -> bool where P::Target: Persist { + fn persist_fn(_persist: P) -> bool + where + P::Target: Persist, + { true } diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index 16bc8200088..d33372ddf1a 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -72,19 +72,9 @@ lightning/src/routing/router.rs lightning/src/routing/scoring.rs lightning/src/routing/test_utils.rs lightning/src/routing/utxo.rs -lightning/src/util/atomic_counter.rs -lightning/src/util/base32.rs -lightning/src/util/byte_utils.rs -lightning/src/util/config.rs -lightning/src/util/errors.rs -lightning/src/util/fuzz_wrappers.rs -lightning/src/util/indexed_map.rs lightning/src/util/invoice.rs -lightning/src/util/logger.rs -lightning/src/util/macro_logger.rs lightning/src/util/message_signing.rs lightning/src/util/mod.rs -lightning/src/util/persist.rs lightning/src/util/scid_utils.rs lightning/src/util/ser.rs lightning/src/util/ser_macros.rs