diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index d2145f1b7bf..ae73a8cc584 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -872,6 +872,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { // looking like probes. }, events::Event::PaymentForwarded { .. } if $node == 1 => {}, + events::Event::ChannelReady { .. } => {}, events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); }, diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 76a7cfc9c4c..76b17ffddd3 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -988,9 +988,12 @@ mod tests { // Set up a background event handler for FundingGenerationReady events. let (sender, receiver) = std::sync::mpsc::sync_channel(1); - let event_handler = move |event: &Event| { - sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(); + let event_handler = move |event: &Event| match event { + Event::FundingGenerationReady { .. } => sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(), + Event::ChannelReady { .. } => {}, + _ => panic!("Unexpected event: {:?}", event), }; + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Open a channel and check that the FundingGenerationReady event was handled. @@ -1014,7 +1017,12 @@ mod tests { // Set up a background event handler for SpendableOutputs events. let (sender, receiver) = std::sync::mpsc::sync_channel(1); - let event_handler = move |event: &Event| sender.send(event.clone()).unwrap(); + let event_handler = move |event: &Event| match event { + Event::SpendableOutputs { .. } => sender.send(event.clone()).unwrap(), + Event::ChannelReady { .. } => {}, + Event::ChannelClosed { .. } => {}, + _ => panic!("Unexpected event: {:?}", event), + }; let persister = Arc::new(Persister::new(data_dir)); let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); @@ -1022,12 +1030,12 @@ mod tests { nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap(); confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32); + let event = receiver .recv_timeout(Duration::from_secs(EVENT_DEADLINE)) - .expect("SpendableOutputs not handled within deadline"); + .expect("Events not handled within deadline"); match event { Event::SpendableOutputs { .. } => {}, - Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event: {:?}", event), } diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index 8b45d5805c9..81b584aa246 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -869,6 +869,8 @@ mod test { get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); nodes[0].node.handle_channel_ready(&nodes[2].node.get_our_node_id(), &as_channel_ready); get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &nodes[2].node.get_our_node_id()); + expect_channel_ready_event(&nodes[2], &nodes[0].node.get_our_node_id()); // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the second // channel, the channel will never be assigned any `counterparty.forwarding_info`. @@ -1257,6 +1259,8 @@ mod test { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id()); nodes[3].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &as_channel_ready); get_event_msg!(nodes[3], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &nodes[3].node.get_our_node_id()); + expect_channel_ready_event(&nodes[3], &nodes[1].node.get_our_node_id()); // As `msgs::ChannelUpdate` was never handled for the participating node(s) of the third // channel, the channel will never be assigned any `counterparty.forwarding_info`. diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index a78cf50acb9..0f922302e37 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1916,6 +1916,13 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: node.gossip_sync.handle_channel_update(&bs_update).unwrap(); } + if !restore_b_before_lock { + expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + } else { + expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + } + + send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7af90c4f843..8c8202e3efd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -244,9 +244,9 @@ enum HTLCUpdateAwaitingACK { /// There are a few "states" and then a number of flags which can be applied: /// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent. /// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we -/// move on to ChannelFunded. -/// Note that PeerDisconnected can be set on both ChannelFunded and FundingSent. -/// ChannelFunded can then get all remaining flags set on it, until we finish shutdown, then we +/// move on to ChannelReady. +/// Note that PeerDisconnected can be set on both ChannelReady and FundingSent. +/// ChannelReady can then get all remaining flags set on it, until we finish shutdown, then we /// move on to ShutdownComplete, at which point most calls into this channel are disallowed. enum ChannelState { /// Implies we have (or are prepared to) send our open_channel/accept_channel message @@ -262,17 +262,17 @@ enum ChannelState { /// and our counterparty consider the funding transaction confirmed. FundingSent = 8, /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message. - /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded. + /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady. TheirChannelReady = 1 << 4, /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message. - /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded. + /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelReady. OurChannelReady = 1 << 5, - ChannelFunded = 64, - /// Flag which is set on ChannelFunded and FundingSent indicating remote side is considered + ChannelReady = 64, + /// Flag which is set on ChannelReady and FundingSent indicating remote side is considered /// "disconnected" and no updates are allowed until after we've done a channel_reestablish /// dance. PeerDisconnected = 1 << 7, - /// Flag which is set on ChannelFunded, FundingCreated, and FundingSent indicating the user has + /// Flag which is set on ChannelReady, FundingCreated, and FundingSent indicating the user has /// told us a ChannelMonitor update is pending async persistence somewhere and we should pause /// sending any outbound messages until they've managed to finish. MonitorUpdateInProgress = 1 << 8, @@ -281,13 +281,13 @@ enum ChannelState { /// messages as then we will be unable to determine which HTLCs they included in their /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent /// later. - /// Flag is set on ChannelFunded. + /// Flag is set on ChannelReady. AwaitingRemoteRevoke = 1 << 9, - /// Flag which is set on ChannelFunded or FundingSent after receiving a shutdown message from + /// Flag which is set on ChannelReady or FundingSent after receiving a shutdown message from /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected /// to respond with our own shutdown message when possible. RemoteShutdownSent = 1 << 10, - /// Flag which is set on ChannelFunded or FundingSent after sending a shutdown message. At this + /// Flag which is set on ChannelReady or FundingSent after sending a shutdown message. At this /// point, we may not add any new HTLCs to the channel. LocalShutdownSent = 1 << 11, /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about @@ -736,6 +736,9 @@ pub(super) struct Channel { // don't currently support node id aliases and eventually privacy should be provided with // blinded paths instead of simple scid+node_id aliases. outbound_scid_alias: u64, + + // We track whether we already emitted a `ChannelReady` event. + channel_ready_event_emitted: bool, } #[cfg(any(test, fuzzing))] @@ -1063,6 +1066,8 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_ready_event_emitted: false, + #[cfg(any(test, fuzzing))] historical_inbound_htlc_fulfills: HashSet::new(), @@ -1397,6 +1402,8 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_ready_event_emitted: false, + #[cfg(any(test, fuzzing))] historical_inbound_htlc_fulfills: HashSet::new(), @@ -1786,11 +1793,11 @@ impl Channel { } fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { - // Either ChannelFunded got set (which means it won't be unset) or there is no way any + // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, // either. - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { panic!("Was asked to fulfill an HTLC when channel was not in an operational state"); } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); @@ -1933,7 +1940,7 @@ impl Channel { /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return /// Ok(_) if debug assertions are turned on or preconditions are met. pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); @@ -2349,9 +2356,9 @@ impl Channel { if non_shutdown_state == ChannelState::FundingSent as u32 { self.channel_state |= ChannelState::TheirChannelReady as u32; } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { - self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); + self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS); self.update_time_counter += 1; - } else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 || + } else if self.channel_state & (ChannelState::ChannelReady as u32) != 0 || // If we reconnected before sending our `channel_ready` they may still resend theirs: (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32)) @@ -2712,12 +2719,12 @@ impl Channel { pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { // We can't accept HTLCs sent after we've sent a shutdown. - let local_sent_shutdown = (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelFunded as u32); + let local_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); if local_sent_shutdown { pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. - let remote_sent_shutdown = (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32); + let remote_sent_shutdown = (self.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32); if remote_sent_shutdown { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } @@ -2894,7 +2901,7 @@ impl Channel { } pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -2905,7 +2912,7 @@ impl Channel { } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned())); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -2917,7 +2924,7 @@ impl Channel { } pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned())); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -2931,7 +2938,7 @@ impl Channel { pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option, ChannelMonitorUpdate), (Option, ChannelError)> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()))); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -3124,7 +3131,7 @@ impl Channel { /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { - if self.channel_state >= ChannelState::ChannelFunded as u32 && + if self.channel_state >= ChannelState::ChannelReady as u32 && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { self.free_holding_cell_htlcs(logger) } else { Ok((None, Vec::new())) } @@ -3252,7 +3259,7 @@ impl Channel { pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result where L::Target: Logger, { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -3696,7 +3703,7 @@ impl Channel { } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelFunded as u32 && self.minimum_depth != Some(0) { + if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.minimum_depth != Some(0) { funding_broadcastable = None; } @@ -4598,6 +4605,16 @@ impl Channel { self.prev_config.map(|prev_config| prev_config.0) } + // Checks whether we should emit a `ChannelReady` event. + pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { + self.is_usable() && !self.channel_ready_event_emitted + } + + // Remembers that we already emitted a `ChannelReady` event. + pub(crate) fn set_channel_ready_event_emitted(&mut self) { + self.channel_ready_event_emitted = true; + } + /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will /// no longer be considered when forwarding HTLCs. @@ -4766,8 +4783,8 @@ impl Channel { /// Returns true if this channel is fully established and not known to be closing. /// Allowed in any state (including after shutdown) pub fn is_usable(&self) -> bool { - let mask = ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK; - (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_channel_ready + let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; + (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready } /// Returns true if this channel is currently available for use. This is a superset of @@ -4826,7 +4843,7 @@ impl Channel { /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { - (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32 + (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelReady as u32 } /// Returns true if our peer has either initiated or agreed to shut down the channel. @@ -4880,14 +4897,14 @@ impl Channel { self.channel_state |= ChannelState::OurChannelReady as u32; true } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) { - self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS); + self.channel_state = ChannelState::ChannelReady as u32 | (self.channel_state & MULTI_STATE_FLAGS); self.update_time_counter += 1; true } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { // We got a reorg but not enough to trigger a force close, just ignore. false } else { - if self.funding_tx_confirmation_height != 0 && self.channel_state < ChannelState::ChannelFunded as u32 { + if self.funding_tx_confirmation_height != 0 && self.channel_state < ChannelState::ChannelReady as u32 { // We should never see a funding transaction on-chain until we've received // funding_signed (if we're an outbound channel), or seen funding_generated (if we're // an inbound channel - before that we have no known funding TXID). The fuzzer, @@ -5031,7 +5048,7 @@ impl Channel { } let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= ChannelState::ChannelFunded as u32 || + if non_shutdown_state >= ChannelState::ChannelReady as u32 || (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1; if self.funding_tx_confirmation_height == 0 { @@ -5059,7 +5076,7 @@ impl Channel { height >= self.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id)); // If funding_tx_confirmed_in is unset, the channel must not be active - assert!(non_shutdown_state <= ChannelState::ChannelFunded as u32); + assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); return Err(ClosureReason::FundingTimedOut); } @@ -5484,7 +5501,7 @@ impl Channel { /// /// If an Err is returned, it's a ChannelError::Ignore! pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } let channel_total_msat = self.channel_value_satoshis * 1000; @@ -5617,7 +5634,7 @@ impl Channel { /// last call to this Channel) send_htlc returned Ok(Some(_)) and there is an Err. /// May panic if called except immediately after a successful, Ok(Some(_))-returning send_htlc. pub fn send_commitment(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger { - if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { + if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { panic!("Cannot create commitment tx until channel is fully established"); } if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) { @@ -5906,7 +5923,7 @@ impl Channel { // funding transaction, don't return a funding txo (which prevents providing the // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. - if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelFunded as u32 | ChannelState::ShutdownComplete as u32) != 0 { + if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { self.latest_monitor_update_id += 1; Some((funding_txo, ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, @@ -6230,6 +6247,8 @@ impl Writeable for Channel { if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config) { Some(self.holder_max_htlc_value_in_flight_msat) } else { None }; + let channel_ready_event_emitted = Some(self.channel_ready_event_emitted); + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -6252,6 +6271,7 @@ impl Writeable for Channel { (17, self.announcement_sigs_state, required), (19, self.latest_inbound_scid_alias, option), (21, self.outbound_scid_alias, required), + (23, channel_ready_event_emitted, option), }); Ok(()) @@ -6509,6 +6529,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent); let mut latest_inbound_scid_alias = None; let mut outbound_scid_alias = None; + let mut channel_ready_event_emitted = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -6526,6 +6547,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel (17, announcement_sigs_state, option), (19, latest_inbound_scid_alias, option), (21, outbound_scid_alias, option), + (23, channel_ready_event_emitted, option), }); if let Some(preimages) = preimages_opt { @@ -6666,6 +6688,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<(&'a K, u32)> for Channel // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing outbound_scid_alias: outbound_scid_alias.unwrap_or(0), + channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), + #[cfg(any(test, fuzzing))] historical_inbound_htlc_fulfills, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 358af825dd4..9aedbb43781 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1466,6 +1466,23 @@ macro_rules! send_channel_ready { } } +macro_rules! emit_channel_ready_event { + ($self: expr, $channel: expr) => { + if $channel.should_emit_channel_ready_event() { + { + let mut pending_events = $self.pending_events.lock().unwrap(); + pending_events.push(events::Event::ChannelReady { + channel_id: $channel.channel_id(), + user_channel_id: $channel.get_user_id(), + counterparty_node_id: $channel.get_counterparty_node_id(), + channel_type: $channel.get_channel_type().clone(), + }); + } + $channel.set_channel_ready_event_emitted(); + } + } +} + macro_rules! handle_chan_restoration_locked { ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, @@ -1509,6 +1526,8 @@ macro_rules! handle_chan_restoration_locked { }); } + emit_channel_ready_event!($self, $channel_entry.get_mut()); + let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve if let Some(monitor_update) = chanmon_update { // We only ever broadcast a funding transaction in response to a funding_signed @@ -4672,6 +4691,9 @@ impl ChannelManager Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -5829,6 +5851,9 @@ where log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id())); } } + + emit_channel_ready_event!(self, channel); + if let Some(announcement_sigs) = announcement_sigs { log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id())); pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { @@ -7981,6 +8006,24 @@ pub mod bench { _ => panic!(), } + let events_a = node_a.get_and_clear_pending_events(); + assert_eq!(events_a.len(), 1); + match events_a[0] { + Event::ChannelReady{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_b.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + + let events_b = node_b.get_and_clear_pending_events(); + assert_eq!(events_b.len(), 1); + match events_b[0] { + Event::ChannelReady{ ref counterparty_node_id, .. } => { + assert_eq!(*counterparty_node_id, node_a.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + } + let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a); let mut payment_count: u64 = 0; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 84fe089d221..e19c87e7b8e 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -741,6 +741,9 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, r assert_eq!(initiator.node.list_usable_channels().len(), initiator_channels + 1); assert_eq!(receiver.node.list_usable_channels().len(), receiver_channels + 1); + expect_channel_ready_event(&initiator, &receiver.node.get_our_node_id()); + expect_channel_ready_event(&receiver, &initiator.node.get_our_node_id()); + (tx, as_channel_ready.channel_id) } @@ -794,6 +797,7 @@ pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx, conf_height); confirm_transaction_at(node_a, tx, conf_height); connect_blocks(node_a, CHAN_CONFIRM_DEPTH - 1); + expect_channel_ready_event(&node_a, &node_b.node.get_our_node_id()); create_chan_between_nodes_with_value_confirm_second(node_b, node_a) } @@ -832,6 +836,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, *node_a.network_chan_count.borrow_mut() += 1; + expect_channel_ready_event(&node_b, &node_a.node.get_our_node_id()); ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone()) } @@ -870,8 +875,10 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: & connect_blocks(&nodes[b], CHAN_CONFIRM_DEPTH - 1); let as_channel_ready = get_event_msg!(nodes[a], MessageSendEvent::SendChannelReady, nodes[b].node.get_our_node_id()); nodes[a].node.handle_channel_ready(&nodes[b].node.get_our_node_id(), &get_event_msg!(nodes[b], MessageSendEvent::SendChannelReady, nodes[a].node.get_our_node_id())); + expect_channel_ready_event(&nodes[a], &nodes[b].node.get_our_node_id()); let as_update = get_event_msg!(nodes[a], MessageSendEvent::SendChannelUpdate, nodes[b].node.get_our_node_id()); nodes[b].node.handle_channel_ready(&nodes[a].node.get_our_node_id(), &as_channel_ready); + expect_channel_ready_event(&nodes[b], &nodes[a].node.get_our_node_id()); let bs_update = get_event_msg!(nodes[b], MessageSendEvent::SendChannelUpdate, nodes[a].node.get_our_node_id()); nodes[a].node.handle_channel_update(&nodes[b].node.get_our_node_id(), &bs_update); @@ -1503,6 +1510,19 @@ macro_rules! expect_payment_forwarded { } } +#[cfg(any(test, feature = "_bench_unstable", feature = "_test_utils"))] +pub fn expect_channel_ready_event<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, expected_counterparty_node_id: &PublicKey) { + let events = node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + crate::util::events::Event::ChannelReady{ ref counterparty_node_id, .. } => { + assert_eq!(*expected_counterparty_node_id, *counterparty_node_id); + }, + _ => panic!("Unexpected event"), + } +} + + pub struct PaymentFailedConditions<'a> { pub(crate) expected_htlc_error_data: Option<(u16, &'a [u8])>, pub(crate) expected_blamed_scid: Option, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index eb6d7032fee..969ab89cf58 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -558,6 +558,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { confirm_transaction_at(&nodes[0], &tx, 2); connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); } #[test] @@ -3711,11 +3712,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } let events_1 = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events_1.len(), 1); - match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, - _ => panic!("Unexpected event"), - }; + if messages_delivered == 0 { + assert_eq!(events_1.len(), 2); + match events_1[0] { + Event::ChannelReady { .. } => { }, + _ => panic!("Unexpected event"), + }; + match events_1[1] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + } else { + assert_eq!(events_1.len(), 1); + match events_1[0] { + Event::PendingHTLCsForwardable { .. } => { }, + _ => panic!("Unexpected event"), + }; + } nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -3952,6 +3965,8 @@ fn test_funding_peer_disconnect() { }, _ => panic!("Unexpected event {:?}", events_6[0]), }; + expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately // broadcast the channel announcement globally, as well as re-send its (now-public) @@ -9429,6 +9444,7 @@ fn test_duplicate_chan_id() { let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + send_payment(&nodes[0], &[&nodes[1]], 8000000); } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index cdc0b83894a..92db5a22254 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -483,6 +483,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false); if confirm_before_reload { let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone(); @@ -499,7 +500,6 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_claim_txn.len(), 1); check_spends!(bs_htlc_claim_txn[0], as_commitment_tx); - expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false); if !confirm_before_reload { mine_transaction(&nodes[0], &as_commitment_tx); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 1f8ffbbfaec..df5ac4bf119 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -202,6 +202,8 @@ fn do_test_1_conf_open(connect_style: ConnectStyle) { } else { panic!("Unexpected event"); } nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready); + expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_msg_events.len(), 1); if let MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } = bs_msg_events[0] { @@ -407,8 +409,10 @@ fn test_inbound_scid_privacy() { connect_blocks(&nodes[2], CHAN_CONFIRM_DEPTH - 1); let bs_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[2].node.get_our_node_id()); nodes[1].node.handle_channel_ready(&nodes[2].node.get_our_node_id(), &get_event_msg!(nodes[2], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + expect_channel_ready_event(&nodes[1], &nodes[2].node.get_our_node_id()); let bs_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[2].node.get_our_node_id()); nodes[2].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready); + expect_channel_ready_event(&nodes[2], &nodes[1].node.get_our_node_id()); let cs_update = get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &cs_update); @@ -672,6 +676,9 @@ fn test_0conf_channel_with_async_monitor() { } _ => panic!("Unexpected event"), } + expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); let as_channel_update = match &as_locked_update[1] { diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 37a146c3dac..0e04442c5f0 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -15,6 +15,7 @@ //! few other things. use crate::chain::keysinterface::SpendableOutputDescriptor; +#[cfg(anchors)] use crate::ln::chan_utils::HTLCOutputInCommitment; use crate::ln::channelmanager::PaymentId; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; @@ -23,10 +24,12 @@ use crate::ln::msgs; use crate::ln::msgs::DecodeError; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::routing::gossip::NetworkUpdate; -use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper}; +use crate::util::ser::{BigSize, FixedLengthReader, Writeable, Writer, MaybeReadable, Readable, VecReadWrapper, VecWriteWrapper, OptionDeserWrapper}; use crate::routing::router::{RouteHop, RouteParameters}; -use bitcoin::{PackedLockTime, Transaction, OutPoint}; +use bitcoin::{PackedLockTime, Transaction}; +#[cfg(anchors)] +use bitcoin::OutPoint; use bitcoin::blockdata::script::Script; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -594,6 +597,27 @@ pub enum Event { /// transaction. claim_from_onchain_tx: bool, }, + /// Used to indicate that a channel with the given `channel_id` is ready to + /// be used. This event is emitted either when the funding transaction has been confirmed + /// on-chain, or, in case of a 0conf channel, when both parties have confirmed the channel + /// establishment. + ChannelReady { + /// The channel_id of the channel that is ready. + channel_id: [u8; 32], + /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound + /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if + /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise + /// `user_channel_id` will be 0 for an inbound channel. + /// + /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel + /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel + /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels + user_channel_id: u64, + /// The node_id of the channel counterparty. + counterparty_node_id: PublicKey, + /// The features that this channel will operate with. + channel_type: ChannelTypeFeatures, + }, /// Used to indicate that a previously opened channel with the given `channel_id` is in the /// process of closure. ChannelClosed { @@ -858,6 +882,15 @@ impl Writeable for Event { BumpTransactionEvent::ChannelClose { .. } => {} } } + &Event::ChannelReady { ref channel_id, ref user_channel_id, ref counterparty_node_id, ref channel_type } => { + 29u8.write(writer)?; + write_tlv_fields!(writer, { + (0, channel_id, required), + (2, user_channel_id, required), + (4, counterparty_node_id, required), + (6, channel_type, required), + }); + }, // Note that, going forward, all new events must only write data inside of // `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write // data via `write_tlv_fields`. @@ -1138,6 +1171,29 @@ impl MaybeReadable for Event { }; f() }, + 27u8 => Ok(None), + 29u8 => { + let f = || { + let mut channel_id = [0; 32]; + let mut user_channel_id: u64 = 0; + let mut counterparty_node_id = OptionDeserWrapper(None); + let mut channel_type = OptionDeserWrapper(None); + read_tlv_fields!(reader, { + (0, channel_id, required), + (2, user_channel_id, required), + (4, counterparty_node_id, required), + (6, channel_type, required), + }); + + Ok(Some(Event::ChannelReady { + channel_id, + user_channel_id, + counterparty_node_id: counterparty_node_id.0.unwrap(), + channel_type: channel_type.0.unwrap() + })) + }; + f() + }, // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. // Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt // reads. diff --git a/pending_changelog/1743.txt b/pending_changelog/1743.txt new file mode 100644 index 00000000000..93e41202c79 --- /dev/null +++ b/pending_changelog/1743.txt @@ -0,0 +1,7 @@ +## API Updates +- A new `ChannelReady` event is generated whenever a channel becomes ready to + be used, i.e., after both sides sent the `channel_ready` message. + +## Backwards Compatibilty +- No `ChannelReady` events will be generated for previously existing channels, including + those which become ready after upgrading 0.0.113.