From 935f84b707793c3100eb127d2de5a6535c5ec41d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 9 Apr 2025 13:47:48 +0200 Subject: [PATCH 01/15] `rustfmt`: Prepare `lightning/src/ln/peer_handler.rs` --- lightning/src/ln/peer_handler.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 07e63cf576c..c1aad908566 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -716,6 +716,7 @@ impl Peer { /// /// This is not exported to bindings users as type aliases aren't supported in most languages. #[cfg(not(c_bindings))] +#[rustfmt::skip] pub type SimpleArcPeerManager = PeerManager< SD, Arc>, @@ -735,6 +736,7 @@ pub type SimpleArcPeerManager = PeerManager< /// /// This is not exported to bindings users as type aliases aren't supported in most languages. #[cfg(not(c_bindings))] +#[rustfmt::skip] pub type SimpleRefPeerManager< 'a, 'b, 'c, 'd, 'e, 'f, 'logger, 'h, 'i, 'j, 'graph, 'k, 'mr, SD, M, T, F, C, L > = PeerManager< @@ -755,6 +757,7 @@ pub type SimpleRefPeerManager< /// This is not exported to bindings users as general cover traits aren't useful in other /// languages. #[allow(missing_docs)] +#[rustfmt::skip] pub trait APeerManager { type Descriptor: SocketDescriptor; type CMT: ChannelMessageHandler + ?Sized; @@ -773,6 +776,7 @@ pub trait APeerManager { fn as_ref(&self) -> &PeerManager; } +#[rustfmt::skip] impl APeerManager for PeerManager where CM::Target: ChannelMessageHandler, @@ -3082,7 +3086,9 @@ mod tests { peers } - fn try_establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor, Result, Result) { + type TestPeer<'a> = PeerManager; + + fn try_establish_connection<'a>(peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>) -> (FileDescriptor, FileDescriptor, Result, Result) { let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; @@ -3113,7 +3119,7 @@ mod tests { } - fn establish_connection<'a>(peer_a: &PeerManager, peer_b: &PeerManager) -> (FileDescriptor, FileDescriptor) { + fn establish_connection<'a>(peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>) -> (FileDescriptor, FileDescriptor) { let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; From 5043ba0c4ca9a89b141f2bd3e283bb79bc29ba2e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 09:55:29 +0200 Subject: [PATCH 02/15] f Skip some `skip`s --- lightning/src/ln/peer_handler.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c1aad908566..db76e036423 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -716,7 +716,6 @@ impl Peer { /// /// This is not exported to bindings users as type aliases aren't supported in most languages. #[cfg(not(c_bindings))] -#[rustfmt::skip] pub type SimpleArcPeerManager = PeerManager< SD, Arc>, @@ -757,7 +756,6 @@ pub type SimpleRefPeerManager< /// This is not exported to bindings users as general cover traits aren't useful in other /// languages. #[allow(missing_docs)] -#[rustfmt::skip] pub trait APeerManager { type Descriptor: SocketDescriptor; type CMT: ChannelMessageHandler + ?Sized; @@ -776,7 +774,6 @@ pub trait APeerManager { fn as_ref(&self) -> &PeerManager; } -#[rustfmt::skip] impl APeerManager for PeerManager where CM::Target: ChannelMessageHandler, From 6ae6c8f48c807493b4320f04a774efbad8a6d613 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 09:59:00 +0200 Subject: [PATCH 03/15] f Drop empty line --- lightning/src/ln/peer_handler.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index db76e036423..2af43cb1b11 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -119,7 +119,6 @@ impl BaseMessageHandler for IgnoringMessageHandler { fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { InitFeatures::empty() } - fn get_and_clear_pending_msg_events(&self) -> Vec { Vec::new() } } impl RoutingMessageHandler for IgnoringMessageHandler { From 30515922cf4a9b46643f890c54f100817973cb2d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 10:06:04 +0200 Subject: [PATCH 04/15] f Pull `filter_fn` out --- lightning/src/ln/peer_handler.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 2af43cb1b11..2a2e56e669b 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1041,7 +1041,7 @@ impl Vec { let peers = self.peers.read().unwrap(); - peers.values().filter_map(|peer_mutex| { + let filter_fn = |peer_mutex: &Mutex| { let p = peer_mutex.lock().unwrap(); if !p.handshake_complete() { return None; @@ -1057,7 +1057,8 @@ impl Date: Thu, 10 Apr 2025 10:08:26 +0200 Subject: [PATCH 05/15] f Pull `node_id` into a variable --- lightning/src/ln/peer_handler.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 2a2e56e669b..037f7116379 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1387,10 +1387,12 @@ impl(&self, peer: &mut Peer, message: &M) { let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); + // `unwrap` SAFETY: `their_node_id` is guaranteed to be `Some` after the handshake + let node_id = peer.their_node_id.unwrap().0; if is_gossip_msg(message.type_id()) { - log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0)); + log_gossip!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(node_id)); } else { - log_trace!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(peer.their_node_id.unwrap().0)) + log_trace!(logger, "Enqueueing message {:?} to {}", message, log_pubkey!(node_id)); } peer.msgs_sent_since_pong += 1; peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message)); From f30a96ae0c830301d0d28095310c9003b3ee6386 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 10:18:54 +0200 Subject: [PATCH 06/15] f Reduce indentation in `do_read_event` --- lightning/src/ln/peer_handler.rs | 437 ++++++++++++++++--------------- 1 file changed, 224 insertions(+), 213 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 037f7116379..02a10463d20 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1409,240 +1409,251 @@ impl { - // This is most likely a simple race condition where the user read some bytes - // from the socket, then we told the user to `disconnect_socket()`, then they - // called this method. Return an error to make sure we get disconnected. - return Err(PeerHandleError { }); - }, - Some(peer_mutex) => { - let mut read_pos = 0; - while read_pos < data.len() { - macro_rules! try_potential_handleerror { - ($peer: expr, $thing: expr) => {{ - let res = $thing; - let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None, None); - match res { - Ok(x) => x, - Err(e) => { - match e.action { - msgs::ErrorAction::DisconnectPeer { .. } => { - // We may have an `ErrorMessage` to send to the peer, - // but writing to the socket while reading can lead to - // re-entrant code and possibly unexpected behavior. The - // message send is optimistic anyway, and in this case - // we immediately disconnect the peer. - log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); - return Err(PeerHandleError { }); - }, - msgs::ErrorAction::DisconnectPeerWithWarning { .. } => { - // We have a `WarningMessage` to send to the peer, but - // writing to the socket while reading can lead to - // re-entrant code and possibly unexpected behavior. The - // message send is optimistic anyway, and in this case - // we immediately disconnect the peer. - log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); - return Err(PeerHandleError { }); - }, - msgs::ErrorAction::IgnoreAndLog(level) => { - log_given_level!(logger, level, "Error handling {}message{}; ignoring: {}", - if level == Level::Gossip { "gossip " } else { "" }, - OptionalFromDebugger(&peer_node_id), e.err); - continue - }, - msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these - msgs::ErrorAction::IgnoreError => { - log_debug!(logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); - continue; - }, - msgs::ErrorAction::SendErrorMessage { msg } => { - log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); - continue; - }, - msgs::ErrorAction::SendWarningMessage { msg, log_level } => { - log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); - self.enqueue_message($peer, &msg); - continue; - }, - } + + if let Some(peer_mutex) = peers.get(peer_descriptor) { + let mut read_pos = 0; + while read_pos < data.len() { + macro_rules! try_potential_handleerror { + ($peer: expr, $thing: expr) => {{ + let res = $thing; + let logger = WithContext::from(&self.logger, peer_node_id.map(|(id, _)| id), None, None); + match res { + Ok(x) => x, + Err(e) => { + match e.action { + msgs::ErrorAction::DisconnectPeer { .. } => { + // We may have an `ErrorMessage` to send to the peer, + // but writing to the socket while reading can lead to + // re-entrant code and possibly unexpected behavior. The + // message send is optimistic anyway, and in this case + // we immediately disconnect the peer. + log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); + return Err(PeerHandleError { }); + }, + msgs::ErrorAction::DisconnectPeerWithWarning { .. } => { + // We have a `WarningMessage` to send to the peer, but + // writing to the socket while reading can lead to + // re-entrant code and possibly unexpected behavior. The + // message send is optimistic anyway, and in this case + // we immediately disconnect the peer. + log_debug!(logger, "Error handling message{}; disconnecting peer with: {}", OptionalFromDebugger(&peer_node_id), e.err); + return Err(PeerHandleError { }); + }, + msgs::ErrorAction::IgnoreAndLog(level) => { + log_given_level!(logger, level, "Error handling {}message{}; ignoring: {}", + if level == Level::Gossip { "gossip " } else { "" }, + OptionalFromDebugger(&peer_node_id), e.err); + continue + }, + msgs::ErrorAction::IgnoreDuplicateGossip => continue, // Don't even bother logging these + msgs::ErrorAction::IgnoreError => { + log_debug!(logger, "Error handling message{}; ignoring: {}", OptionalFromDebugger(&peer_node_id), e.err); + continue; + }, + msgs::ErrorAction::SendErrorMessage { msg } => { + log_debug!(logger, "Error handling message{}; sending error message with: {}", OptionalFromDebugger(&peer_node_id), e.err); + self.enqueue_message($peer, &msg); + continue; + }, + msgs::ErrorAction::SendWarningMessage { msg, log_level } => { + log_given_level!(logger, log_level, "Error handling message{}; sending warning message with: {}", OptionalFromDebugger(&peer_node_id), e.err); + self.enqueue_message($peer, &msg); + continue; + }, } } - }} - } - - let mut peer_lock = peer_mutex.lock().unwrap(); - let peer = &mut *peer_lock; - let mut msg_to_handle = None; - if peer_node_id.is_none() { - peer_node_id.clone_from(&peer.their_node_id); - } + } + }} + } - assert!(peer.pending_read_buffer.len() > 0); - assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos); + let mut peer_lock = peer_mutex.lock().unwrap(); + let peer = &mut *peer_lock; + let mut msg_to_handle = None; + if peer_node_id.is_none() { + peer_node_id.clone_from(&peer.their_node_id); + } - { - let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos); - peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]); - read_pos += data_to_copy; - peer.pending_read_buffer_pos += data_to_copy; - } + assert!(peer.pending_read_buffer.len() > 0); + assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos); - if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() { - peer.pending_read_buffer_pos = 0; + { + let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos); + peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]); + read_pos += data_to_copy; + peer.pending_read_buffer_pos += data_to_copy; + } - macro_rules! insert_node_id { - () => { - let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); - match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) { - hash_map::Entry::Occupied(e) => { - log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0)); - peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event - // Check that the peers map is consistent with the - // node_id_to_descriptor map, as this has been broken - // before. - debug_assert!(peers.get(e.get()).is_some()); - return Err(PeerHandleError { }) - }, - hash_map::Entry::Vacant(entry) => { - log_debug!(logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0)); - entry.insert(peer_descriptor.clone()) - }, - }; - } + if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() { + peer.pending_read_buffer_pos = 0; + + macro_rules! insert_node_id { + () => { + let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); + match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) { + hash_map::Entry::Occupied(e) => { + log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0)); + // Unset `their_node_id` so that we don't generate a peer_disconnected event + // Check that the peers map is consistent with the + // node_id_to_descriptor map, as this has been broken + // before. + peer.their_node_id = None; + debug_assert!(peers.get(e.get()).is_some()); + return Err(PeerHandleError { }) + }, + hash_map::Entry::Vacant(entry) => { + log_debug!(logger, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap().0)); + entry.insert(peer_descriptor.clone()) + }, + }; } + } - let next_step = peer.channel_encryptor.get_noise_step(); - match next_step { - NextNoiseStep::ActOne => { - let act_two = try_potential_handleerror!(peer, peer.channel_encryptor - .process_act_one_with_keys(&peer.pending_read_buffer[..], - &self.node_signer, self.get_ephemeral_key(), &self.secp_ctx)).to_vec(); - peer.pending_outbound_buffer.push_back(act_two); - peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long - }, - NextNoiseStep::ActTwo => { - let (act_three, their_node_id) = try_potential_handleerror!(peer, - peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], - &self.node_signer)); - peer.pending_outbound_buffer.push_back(act_three.to_vec()); - peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes - peer.pending_read_is_header = true; + let next_step = peer.channel_encryptor.get_noise_step(); + match next_step { + NextNoiseStep::ActOne => { + let act_two = try_potential_handleerror!( + peer, + peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], + &self.node_signer, + self.get_ephemeral_key(), + &self.secp_ctx + )).to_vec(); + peer.pending_outbound_buffer.push_back(act_two); + peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long + }, + NextNoiseStep::ActTwo => { + let (act_three, their_node_id) = try_potential_handleerror!( + peer, + peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], + &self.node_signer + )); + peer.pending_outbound_buffer.push_back(act_three.to_vec()); + peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes + peer.pending_read_is_header = true; + + peer.set_their_node_id(their_node_id); + insert_node_id!(); + let features = self.init_features(their_node_id); + let networks = self.message_handler.chan_handler.get_chain_hashes(); + let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) }; + self.enqueue_message(peer, &resp); + }, + NextNoiseStep::ActThree => { + let their_node_id = try_potential_handleerror!( + peer, + peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]) + ); + peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes + peer.pending_read_is_header = true; + peer.set_their_node_id(their_node_id); + insert_node_id!(); + let features = self.init_features(their_node_id); + let networks = self.message_handler.chan_handler.get_chain_hashes(); + let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) }; + self.enqueue_message(peer, &resp); + }, + NextNoiseStep::NoiseComplete => { + if peer.pending_read_is_header { + let msg_len = try_potential_handleerror!( + peer, + peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]) + ); + if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } + peer.pending_read_buffer.resize(msg_len as usize + 16, 0); + if msg_len < 2 { // Need at least the message type tag + return Err(PeerHandleError { }); + } + peer.pending_read_is_header = false; + } else { + debug_assert!(peer.pending_read_buffer.len() >= 2 + 16); + try_potential_handleerror!( + peer, + peer.channel_encryptor.decrypt_message(&mut peer.pending_read_buffer[..]) + ); + + let message_result = wire::read( + &mut &peer.pending_read_buffer[..peer.pending_read_buffer.len() - 16], + &*self.message_handler.custom_message_handler + ); - peer.set_their_node_id(their_node_id); - insert_node_id!(); - let features = self.init_features(their_node_id); - let networks = self.message_handler.chan_handler.get_chain_hashes(); - let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) }; - self.enqueue_message(peer, &resp); - }, - NextNoiseStep::ActThree => { - let their_node_id = try_potential_handleerror!(peer, - peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..])); - peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes + // Reset read buffer + if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } + peer.pending_read_buffer.resize(18, 0); peer.pending_read_is_header = true; - peer.set_their_node_id(their_node_id); - insert_node_id!(); - let features = self.init_features(their_node_id); - let networks = self.message_handler.chan_handler.get_chain_hashes(); - let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) }; - self.enqueue_message(peer, &resp); - }, - NextNoiseStep::NoiseComplete => { - if peer.pending_read_is_header { - let msg_len = try_potential_handleerror!(peer, - peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..])); - if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } - peer.pending_read_buffer.resize(msg_len as usize + 16, 0); - if msg_len < 2 { // Need at least the message type tag - return Err(PeerHandleError { }); - } - peer.pending_read_is_header = false; - } else { - debug_assert!(peer.pending_read_buffer.len() >= 2 + 16); - try_potential_handleerror!(peer, - peer.channel_encryptor.decrypt_message(&mut peer.pending_read_buffer[..])); - - let message_result = wire::read( - &mut &peer.pending_read_buffer[..peer.pending_read_buffer.len() - 16], - &*self.message_handler.custom_message_handler - ); - // Reset read buffer - if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } - peer.pending_read_buffer.resize(18, 0); - peer.pending_read_is_header = true; - - let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); - let message = match message_result { - Ok(x) => x, - Err(e) => { - match e { - // Note that to avoid re-entrancy we never call - // `do_attempt_write_data` from here, causing - // the messages enqueued here to not actually - // be sent before the peer is disconnected. - (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => { - log_gossip!(logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); - continue; - } - (msgs::DecodeError::UnsupportedCompression, _) => { - log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); - self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() }); - continue; - } - (_, Some(ty)) if is_gossip_msg(ty) => { - log_gossip!(logger, "Got an invalid value while deserializing a gossip message"); - self.enqueue_message(peer, &msgs::WarningMessage { - channel_id: ChannelId::new_zero(), - data: format!("Unreadable/bogus gossip message of type {}", ty), - }); - continue; - } - (msgs::DecodeError::UnknownRequiredFeature, _) => { - log_debug!(logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); - return Err(PeerHandleError { }); - } - (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }), - (msgs::DecodeError::InvalidValue, _) => { - log_debug!(logger, "Got an invalid value while deserializing message"); - return Err(PeerHandleError { }); - } - (msgs::DecodeError::ShortRead, _) => { - log_debug!(logger, "Deserialization failed due to shortness of message"); - return Err(PeerHandleError { }); - } - (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }), - (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }), - (msgs::DecodeError::DangerousValue, _) => return Err(PeerHandleError { }), + let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); + let message = match message_result { + Ok(x) => x, + Err(e) => { + match e { + // Note that to avoid re-entrancy we never call + // `do_attempt_write_data` from here, causing + // the messages enqueued here to not actually + // be sent before the peer is disconnected. + (msgs::DecodeError::UnknownRequiredFeature, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); + continue; + } + (msgs::DecodeError::UnsupportedCompression, _) => { + log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); + self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() }); + continue; + } + (_, Some(ty)) if is_gossip_msg(ty) => { + log_gossip!(logger, "Got an invalid value while deserializing a gossip message"); + self.enqueue_message(peer, &msgs::WarningMessage { + channel_id: ChannelId::new_zero(), + data: format!("Unreadable/bogus gossip message of type {}", ty), + }); + continue; + } + (msgs::DecodeError::UnknownRequiredFeature, _) => { + log_debug!(logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); + return Err(PeerHandleError { }); } + (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }), + (msgs::DecodeError::InvalidValue, _) => { + log_debug!(logger, "Got an invalid value while deserializing message"); + return Err(PeerHandleError { }); + } + (msgs::DecodeError::ShortRead, _) => { + log_debug!(logger, "Deserialization failed due to shortness of message"); + return Err(PeerHandleError { }); + } + (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }), + (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }), + (msgs::DecodeError::DangerousValue, _) => return Err(PeerHandleError { }), } - }; + } + }; - msg_to_handle = Some(message); - } + msg_to_handle = Some(message); } } } - pause_read = !self.peer_should_read(peer); - - if let Some(message) = msg_to_handle { - match self.handle_message(&peer_mutex, peer_lock, message) { - Err(handling_error) => match handling_error { - MessageHandlingError::PeerHandleError(e) => { return Err(e) }, - MessageHandlingError::LightningError(e) => { - try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e)); - }, - }, - Ok(Some(msg)) => { - msgs_to_forward.push(msg); + } + pause_read = !self.peer_should_read(peer); + + if let Some(message) = msg_to_handle { + match self.handle_message(&peer_mutex, peer_lock, message) { + Err(handling_error) => match handling_error { + MessageHandlingError::PeerHandleError(e) => { return Err(e) }, + MessageHandlingError::LightningError(e) => { + try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e)); }, - Ok(None) => {}, - } + }, + Ok(Some(msg)) => { + msgs_to_forward.push(msg); + }, + Ok(None) => {}, } } } + } else { + // This is most likely a simple race condition where the user read some bytes + // from the socket, then we told the user to `disconnect_socket()`, then they + // called this method. Return an error to make sure we get disconnected. + return Err(PeerHandleError {}); } for msg in msgs_to_forward.drain(..) { From 380d47a65a8b5307c24296f49bcd0e1948f8d665 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 10:31:13 +0200 Subject: [PATCH 07/15] f Pull out `their_node_id` --- lightning/src/ln/peer_handler.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 02a10463d20..90b88aa6664 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1386,7 +1386,8 @@ impl(&self, peer: &mut Peer, message: &M) { - let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); + let their_node_id = peer.their_node_id.map(|p| p.0); + let logger = WithContext::from(&self.logger, their_node_id, None, None); // `unwrap` SAFETY: `their_node_id` is guaranteed to be `Some` after the handshake let node_id = peer.their_node_id.unwrap().0; if is_gossip_msg(message.type_id()) { @@ -1488,7 +1489,8 @@ impl { - let logger = WithContext::from(&self.logger, peer.their_node_id.map(|p| p.0), None, None); + let their_node_id = peer.their_node_id.map(|p| p.0); + let logger = WithContext::from(&self.logger, their_node_id, None, None); match self.node_id_to_descriptor.lock().unwrap().entry(peer.their_node_id.unwrap().0) { hash_map::Entry::Occupied(e) => { log_trace!(logger, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap().0)); @@ -1581,7 +1583,8 @@ impl x, Err(e) => { @@ -2107,7 +2110,8 @@ impl Date: Thu, 10 Apr 2025 10:43:04 +0200 Subject: [PATCH 08/15] f Pull more node_ids into vars --- lightning/src/ln/peer_handler.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 90b88aa6664..4fe8a46e915 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -3196,17 +3196,19 @@ mod tests { let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; } + let node_id_1 = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); cfgs[0].chan_handler.pending_events.lock().unwrap() .push(MessageSendEvent::SendShutdown { - node_id: peers[1].node_signer.get_node_id(Recipient::Node).unwrap(), + node_id: node_id_1, msg: msgs::Shutdown { channel_id: ChannelId::new_zero(), scriptpubkey: bitcoin::ScriptBuf::new(), }, }); + let node_id_0 = peers[0].node_signer.get_node_id(Recipient::Node).unwrap(); cfgs[1].chan_handler.pending_events.lock().unwrap() .push(MessageSendEvent::SendShutdown { - node_id: peers[0].node_signer.get_node_id(Recipient::Node).unwrap(), + node_id: node_id_0, msg: msgs::Shutdown { channel_id: ChannelId::new_zero(), scriptpubkey: bitcoin::ScriptBuf::new(), From e1ae0da3d5708f8566d33e475f14ed01a3e106d3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Apr 2025 10:48:16 +0200 Subject: [PATCH 09/15] f Pull out `peer` --- lightning/src/ln/peer_handler.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 4fe8a46e915..daf6066f953 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -2597,7 +2597,8 @@ impl Date: Thu, 17 Apr 2025 12:36:02 +0200 Subject: [PATCH 10/15] f Pull handlers out in variables --- lightning/src/ln/peer_handler.rs | 62 +++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index daf6066f953..7c37927f225 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1240,8 +1240,9 @@ impl { - if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(None) { + let handler = &self.message_handler.route_handler; + if let Some(msg) = handler.get_next_node_announcement(None) { self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); } else { @@ -1280,7 +1282,8 @@ impl unreachable!(), InitSyncTracker::NodesSyncing(sync_node_id) => { - if let Some(msg) = self.message_handler.route_handler.get_next_node_announcement(Some(&sync_node_id)) { + let handler = &self.message_handler.route_handler; + if let Some(msg) = handler.get_next_node_announcement(Some(&sync_node_id)) { self.enqueue_message(peer, &msg); peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id); } else { @@ -1688,7 +1691,8 @@ impl { log_trace!(logger, "Received commitment_signed batch {:?} from {}", batch, log_pubkey!(their_node_id)); - self.message_handler.chan_handler.handle_commitment_signed_batch(their_node_id, channel_id, batch); + let chan_handler = &self.message_handler.chan_handler; + chan_handler.handle_commitment_signed_batch(their_node_id, channel_id, batch); return Ok(None); }, None => Ok(None), @@ -1713,7 +1717,8 @@ impl { - self.message_handler.chan_handler.handle_update_fail_malformed_htlc(their_node_id, &msg); + let chan_handler = &self.message_handler.chan_handler; + chan_handler.handle_update_fail_malformed_htlc(their_node_id, &msg); }, wire::Message::CommitmentSigned(msg) => { @@ -2029,46 +2040,54 @@ impl { - self.message_handler.chan_handler.handle_announcement_signatures(their_node_id, &msg); + let chan_handler = &self.message_handler.chan_handler; + chan_handler.handle_announcement_signatures(their_node_id, &msg); }, wire::Message::ChannelAnnouncement(msg) => { - if self.message_handler.route_handler.handle_channel_announcement(Some(their_node_id), &msg) + let route_handler = &self.message_handler.route_handler; + if route_handler.handle_channel_announcement(Some(their_node_id), &msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::ChannelAnnouncement(msg)); } self.update_gossip_backlogged(); }, wire::Message::NodeAnnouncement(msg) => { - if self.message_handler.route_handler.handle_node_announcement(Some(their_node_id), &msg) + let route_handler = &self.message_handler.route_handler; + if route_handler.handle_node_announcement(Some(their_node_id), &msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::NodeAnnouncement(msg)); } self.update_gossip_backlogged(); }, wire::Message::ChannelUpdate(msg) => { - self.message_handler.chan_handler.handle_channel_update(their_node_id, &msg); - if self.message_handler.route_handler.handle_channel_update(Some(their_node_id), &msg) + let route_handler = &self.message_handler.route_handler; + if route_handler.handle_channel_update(Some(their_node_id), &msg) .map_err(|e| -> MessageHandlingError { e.into() })? { should_forward = Some(wire::Message::ChannelUpdate(msg)); } self.update_gossip_backlogged(); }, wire::Message::QueryShortChannelIds(msg) => { - self.message_handler.route_handler.handle_query_short_channel_ids(their_node_id, msg)?; + let route_handler = &self.message_handler.route_handler; + route_handler.handle_query_short_channel_ids(their_node_id, msg)?; }, wire::Message::ReplyShortChannelIdsEnd(msg) => { - self.message_handler.route_handler.handle_reply_short_channel_ids_end(their_node_id, msg)?; + let route_handler = &self.message_handler.route_handler; + route_handler.handle_reply_short_channel_ids_end(their_node_id, msg)?; }, wire::Message::QueryChannelRange(msg) => { - self.message_handler.route_handler.handle_query_channel_range(their_node_id, msg)?; + let route_handler = &self.message_handler.route_handler; + route_handler.handle_query_channel_range(their_node_id, msg)?; }, wire::Message::ReplyChannelRange(msg) => { - self.message_handler.route_handler.handle_reply_channel_range(their_node_id, msg)?; + let route_handler = &self.message_handler.route_handler; + route_handler.handle_reply_channel_range(their_node_id, msg)?; }, // Onion message: wire::Message::OnionMessage(msg) => { - self.message_handler.onion_message_handler.handle_onion_message(their_node_id, &msg); + let onion_message_handler = &self.message_handler.onion_message_handler; + onion_message_handler.handle_onion_message(their_node_id, &msg); }, // Unknown messages: @@ -2080,7 +2099,8 @@ impl { - self.message_handler.custom_message_handler.handle_custom_message(custom, their_node_id)?; + let custom_message_handler = &self.message_handler.custom_message_handler; + custom_message_handler.handle_custom_message(custom, their_node_id)?; }, }; Ok(should_forward) From edf8427d656deb3e0fca8bc120f42092bd29be8c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 17 Apr 2025 12:52:56 +0200 Subject: [PATCH 11/15] f Pull warning message parts out to variables --- lightning/src/ln/peer_handler.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 7c37927f225..bbe1960fd18 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1602,15 +1602,21 @@ impl { log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); - self.enqueue_message(peer, &msgs::WarningMessage { channel_id: ChannelId::new_zero(), data: "Unsupported message compression: zlib".to_owned() }); + let channel_id = ChannelId::new_zero(); + let data = "Unsupported message compression: zlib".to_owned(); + let msg = msgs::WarningMessage { channel_id, data }; + self.enqueue_message(peer, &msg); continue; } (_, Some(ty)) if is_gossip_msg(ty) => { log_gossip!(logger, "Got an invalid value while deserializing a gossip message"); - self.enqueue_message(peer, &msgs::WarningMessage { - channel_id: ChannelId::new_zero(), - data: format!("Unreadable/bogus gossip message of type {}", ty), - }); + let channel_id = ChannelId::new_zero(); + let data = format!("Unreadable/bogus gossip message of type {}", ty); + let msg = msgs::WarningMessage { + channel_id, + data, + }; + self.enqueue_message(peer, &msg); continue; } (msgs::DecodeError::UnknownRequiredFeature, _) => { From c6045a9428c78e9594f682aa0cd8dd95a773663c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Apr 2025 15:31:20 +0200 Subject: [PATCH 12/15] f Move more message_handler variables out --- lightning/src/ln/peer_handler.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index bbe1960fd18..0e7ad40c654 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -2279,6 +2279,11 @@ impl { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); - match self.message_handler.route_handler.handle_channel_announcement(None, &msg) { + match route_handler.handle_channel_announcement(None, &msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { let forward = wire::Message::ChannelAnnouncement(msg); self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); @@ -2493,7 +2498,7 @@ impl {}, } if let Some(msg) = update_msg { - match self.message_handler.route_handler.handle_channel_update(None, &msg) { + match route_handler.handle_channel_update(None, &msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { let forward = wire::Message::ChannelUpdate(msg); self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); @@ -2504,7 +2509,7 @@ impl { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for contents {:?}", msg.contents); - match self.message_handler.route_handler.handle_channel_update(None, &msg) { + match route_handler.handle_channel_update(None, &msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { let forward = wire::Message::ChannelUpdate(msg); self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); @@ -2514,7 +2519,7 @@ impl { log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id); - match self.message_handler.route_handler.handle_node_announcement(None, &msg) { + match route_handler.handle_node_announcement(None, &msg) { Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { let forward = wire::Message::NodeAnnouncement(msg); self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); @@ -2606,22 +2611,22 @@ impl Date: Tue, 29 Apr 2025 15:41:47 +0200 Subject: [PATCH 13/15] f Refactor disconnection bools --- lightning/src/ln/peer_handler.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 0e7ad40c654..735391d2ffc 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -2809,11 +2809,10 @@ impl 0 && !peer.received_message_since_timer_tick) - || peer.awaiting_pong_timer_tick_intervals as u64 > - MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64 - { + let not_recently_active = peer.awaiting_pong_timer_tick_intervals > 0 && !peer.received_message_since_timer_tick; + let reached_threshold_intervals = peer.awaiting_pong_timer_tick_intervals as u64 > + MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64; + if not_recently_active || reached_threshold_intervals { descriptors_needing_disconnect.push(descriptor.clone()); break; } From 9b856e366875370814890ac006550496a29a2693 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Apr 2025 15:42:03 +0200 Subject: [PATCH 14/15] `rustfmt`: Run on `lightning/src/ln/peer_handler.rs` --- lightning/src/ln/peer_handler.rs | 2075 +++++++++++++++++++++--------- 1 file changed, 1440 insertions(+), 635 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 735391d2ffc..75855f8729b 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -15,28 +15,39 @@ //! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with //! messages they should handle, and encoding/sending response messages. -use bitcoin::Txid; use bitcoin::constants::ChainHash; -use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; +use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey}; +use bitcoin::Txid; use crate::blinded_path::message::{AsyncPaymentsContext, DNSResolverContext, OffersContext}; -use crate::sign::{NodeSigner, Recipient}; -use crate::ln::types::ChannelId; -use crate::types::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, LightningError, SocketAddress, MessageSendEvent, OnionMessageHandler, RoutingMessageHandler}; -use crate::util::ser::{VecWriter, Writeable, Writer}; -use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; +use crate::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, Init, LightningError, MessageSendEvent, + OnionMessageHandler, RoutingMessageHandler, SocketAddress, +}; +use crate::ln::peer_channel_encryptor::{ + MessageBuf, NextNoiseStep, PeerChannelEncryptor, MSG_BUF_ALLOC_SIZE, +}; +use crate::ln::types::ChannelId; use crate::ln::wire; use crate::ln::wire::{Encode, Type}; -use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc}; -use crate::onion_message::dns_resolution::{DNSResolverMessageHandler, DNSResolverMessage, DNSSECProof, DNSSECQuery}; -use crate::onion_message::messenger::{CustomOnionMessageHandler, Responder, ResponseInstruction, MessageSendInstructions}; +use crate::onion_message::async_payments::{ + AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc, +}; +use crate::onion_message::dns_resolution::{ + DNSResolverMessage, DNSResolverMessageHandler, DNSSECProof, DNSSECQuery, +}; +use crate::onion_message::messenger::{ + CustomOnionMessageHandler, MessageSendInstructions, Responder, ResponseInstruction, +}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::onion_message::packet::OnionMessageContents; -use crate::routing::gossip::{NodeId, NodeAlias}; +use crate::routing::gossip::{NodeAlias, NodeId}; +use crate::sign::{NodeSigner, Recipient}; +use crate::types::features::{InitFeatures, NodeFeatures}; use crate::util::atomic_counter::AtomicCounter; use crate::util::logger::{Level, Logger, WithContext}; +use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::util::string::PrintableString; #[allow(unused_imports)] @@ -45,11 +56,11 @@ use crate::prelude::*; use alloc::collections::{btree_map, BTreeMap}; use crate::io; -use crate::sync::{Mutex, MutexGuard, FairRwLock}; -use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering}; -use core::{cmp, hash, fmt, mem}; -use core::ops::Deref; +use crate::sync::{FairRwLock, Mutex, MutexGuard}; use core::convert::Infallible; +use core::ops::Deref; +use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering}; +use core::{cmp, fmt, hash, mem}; #[cfg(not(c_bindings))] use { crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager}, @@ -61,7 +72,7 @@ use { use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256::HashEngine as Sha256Engine; -use bitcoin::hashes::{HashEngine, Hash}; +use bitcoin::hashes::{Hash, HashEngine}; /// A handler provided to [`PeerManager`] for reading and handling custom messages. /// @@ -75,7 +86,9 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// Handles the given message sent from `sender_node_id`, possibly producing messages for /// [`CustomMessageHandler::get_and_clear_pending_msg`] to return and thus for [`PeerManager`] /// to send. - fn handle_custom_message(&self, msg: Self::CustomMessage, sender_node_id: PublicKey) -> Result<(), LightningError>; + fn handle_custom_message( + &self, msg: Self::CustomMessage, sender_node_id: PublicKey, + ) -> Result<(), LightningError>; /// Returns the list of pending messages that were generated by the handler, clearing the list /// in the process. Each message is paired with the node id of the intended recipient. If no @@ -92,7 +105,8 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// message handlers may still wish to communicate with this peer. /// /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. - fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; + fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) + -> Result<(), ()>; /// Gets the node feature flags which this handler itself supports. All available handlers are /// queried similarly and their feature flags are OR'd together to form the [`NodeFeatures`] @@ -111,38 +125,89 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// A dummy struct which implements `RoutingMessageHandler` without storing any routing information /// or doing any processing. You can provide one of these as the route_handler in a MessageHandler. -pub struct IgnoringMessageHandler{} +pub struct IgnoringMessageHandler {} impl BaseMessageHandler for IgnoringMessageHandler { fn peer_disconnected(&self, _their_node_id: PublicKey) {} - fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn peer_connected( + &self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool, + ) -> Result<(), ()> { + Ok(()) + } + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::empty() + } fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { InitFeatures::empty() } - fn get_and_clear_pending_msg_events(&self) -> Vec { Vec::new() } + fn get_and_clear_pending_msg_events(&self) -> Vec { + Vec::new() + } } impl RoutingMessageHandler for IgnoringMessageHandler { - fn handle_node_announcement(&self, _their_node_id: Option, _msg: &msgs::NodeAnnouncement) -> Result { Ok(false) } - fn handle_channel_announcement(&self, _their_node_id: Option, _msg: &msgs::ChannelAnnouncement) -> Result { Ok(false) } - fn handle_channel_update(&self, _their_node_id: Option, _msg: &msgs::ChannelUpdate) -> Result { Ok(false) } - fn get_next_channel_announcement(&self, _starting_point: u64) -> - Option<(msgs::ChannelAnnouncement, Option, Option)> { None } - fn get_next_node_announcement(&self, _starting_point: Option<&NodeId>) -> Option { None } - fn handle_reply_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), LightningError> { Ok(()) } - fn handle_reply_short_channel_ids_end(&self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd) -> Result<(), LightningError> { Ok(()) } - fn handle_query_channel_range(&self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange) -> Result<(), LightningError> { Ok(()) } - fn handle_query_short_channel_ids(&self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds) -> Result<(), LightningError> { Ok(()) } - fn processing_queue_high(&self) -> bool { false } + fn handle_node_announcement( + &self, _their_node_id: Option, _msg: &msgs::NodeAnnouncement, + ) -> Result { + Ok(false) + } + fn handle_channel_announcement( + &self, _their_node_id: Option, _msg: &msgs::ChannelAnnouncement, + ) -> Result { + Ok(false) + } + fn handle_channel_update( + &self, _their_node_id: Option, _msg: &msgs::ChannelUpdate, + ) -> Result { + Ok(false) + } + fn get_next_channel_announcement( + &self, _starting_point: u64, + ) -> Option<(msgs::ChannelAnnouncement, Option, Option)> + { + None + } + fn get_next_node_announcement( + &self, _starting_point: Option<&NodeId>, + ) -> Option { + None + } + fn handle_reply_channel_range( + &self, _their_node_id: PublicKey, _msg: msgs::ReplyChannelRange, + ) -> Result<(), LightningError> { + Ok(()) + } + fn handle_reply_short_channel_ids_end( + &self, _their_node_id: PublicKey, _msg: msgs::ReplyShortChannelIdsEnd, + ) -> Result<(), LightningError> { + Ok(()) + } + fn handle_query_channel_range( + &self, _their_node_id: PublicKey, _msg: msgs::QueryChannelRange, + ) -> Result<(), LightningError> { + Ok(()) + } + fn handle_query_short_channel_ids( + &self, _their_node_id: PublicKey, _msg: msgs::QueryShortChannelIds, + ) -> Result<(), LightningError> { + Ok(()) + } + fn processing_queue_high(&self) -> bool { + false + } } impl OnionMessageHandler for IgnoringMessageHandler { fn handle_onion_message(&self, _their_node_id: PublicKey, _msg: &msgs::OnionMessage) {} - fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option { None } + fn next_onion_message_for_peer(&self, _peer_node_id: PublicKey) -> Option { + None + } fn timer_tick_occurred(&self) {} } impl OffersMessageHandler for IgnoringMessageHandler { - fn handle_message(&self, _message: OffersMessage, _context: Option, _responder: Option) -> Option<(OffersMessage, ResponseInstruction)> { + fn handle_message( + &self, _message: OffersMessage, _context: Option, + _responder: Option, + ) -> Option<(OffersMessage, ResponseInstruction)> { None } } @@ -165,11 +230,18 @@ impl DNSResolverMessageHandler for IgnoringMessageHandler { } impl CustomOnionMessageHandler for IgnoringMessageHandler { type CustomMessage = Infallible; - fn handle_custom_message(&self, _message: Infallible, _context: Option>, _responder: Option) -> Option<(Infallible, ResponseInstruction)> { + fn handle_custom_message( + &self, _message: Infallible, _context: Option>, _responder: Option, + ) -> Option<(Infallible, ResponseInstruction)> { // Since we always return `None` in the read the handle method should never be called. unreachable!(); } - fn read_custom_message(&self, _msg_type: u64, _buffer: &mut R) -> Result, msgs::DecodeError> where Self: Sized { + fn read_custom_message( + &self, _msg_type: u64, _buffer: &mut R, + ) -> Result, msgs::DecodeError> + where + Self: Sized, + { Ok(None) } fn release_pending_custom_messages(&self) -> Vec<(Infallible, MessageSendInstructions)> { @@ -178,16 +250,24 @@ impl CustomOnionMessageHandler for IgnoringMessageHandler { } impl OnionMessageContents for Infallible { - fn tlv_type(&self) -> u64 { unreachable!(); } + fn tlv_type(&self) -> u64 { + unreachable!(); + } #[cfg(c_bindings)] - fn msg_type(&self) -> String { unreachable!(); } + fn msg_type(&self) -> String { + unreachable!(); + } #[cfg(not(c_bindings))] - fn msg_type(&self) -> &'static str { unreachable!(); } + fn msg_type(&self) -> &'static str { + unreachable!(); + } } impl Deref for IgnoringMessageHandler { type Target = IgnoringMessageHandler; - fn deref(&self) -> &Self { self } + fn deref(&self) -> &Self { + self + } } // Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a @@ -205,24 +285,36 @@ impl Writeable for Infallible { impl wire::CustomMessageReader for IgnoringMessageHandler { type CustomMessage = Infallible; - fn read(&self, _message_type: u16, _buffer: &mut R) -> Result, msgs::DecodeError> { + fn read( + &self, _message_type: u16, _buffer: &mut R, + ) -> Result, msgs::DecodeError> { Ok(None) } } impl CustomMessageHandler for IgnoringMessageHandler { - fn handle_custom_message(&self, _msg: Infallible, _sender_node_id: PublicKey) -> Result<(), LightningError> { + fn handle_custom_message( + &self, _msg: Infallible, _sender_node_id: PublicKey, + ) -> Result<(), LightningError> { // Since we always return `None` in the read the handle method should never be called. unreachable!(); } - fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() } + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { + Vec::new() + } fn peer_disconnected(&self, _their_node_id: PublicKey) {} - fn peer_connected(&self, _their_node_id: PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { Ok(()) } + fn peer_connected( + &self, _their_node_id: PublicKey, _msg: &Init, _inbound: bool, + ) -> Result<(), ()> { + Ok(()) + } - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::empty() + } fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { InitFeatures::empty() @@ -232,7 +324,7 @@ impl CustomMessageHandler for IgnoringMessageHandler { /// A dummy struct which implements `ChannelMessageHandler` without having any channels. /// You can provide one of these as the route_handler in a MessageHandler. pub struct ErroringMessageHandler { - message_queue: Mutex> + message_queue: Mutex>, } impl ErroringMessageHandler { /// Constructs a new ErroringMessageHandler @@ -242,7 +334,10 @@ impl ErroringMessageHandler { fn push_error(&self, node_id: PublicKey, channel_id: ChannelId) { self.message_queue.lock().unwrap().push(MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { channel_id, data: "We do not support channel messages, sorry.".to_owned() }, + msg: msgs::ErrorMessage { + channel_id, + data: "We do not support channel messages, sorry.".to_owned(), + }, }, node_id, }); @@ -250,8 +345,14 @@ impl ErroringMessageHandler { } impl BaseMessageHandler for ErroringMessageHandler { fn peer_disconnected(&self, _their_node_id: PublicKey) {} - fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn peer_connected( + &self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool, + ) -> Result<(), ()> { + Ok(()) + } + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::empty() + } fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures { // Set a number of features which various nodes may require to talk to us. It's totally // reasonable to indicate we "support" all kinds of channel features...we just reject all @@ -283,10 +384,18 @@ impl ChannelMessageHandler for ErroringMessageHandler { // Any messages which are related to a specific channel generate an error message to let the // peer know we don't care about channels. fn handle_open_channel(&self, their_node_id: PublicKey, msg: &msgs::OpenChannel) { - ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); + ErroringMessageHandler::push_error( + self, + their_node_id, + msg.common_fields.temporary_channel_id, + ); } fn handle_accept_channel(&self, their_node_id: PublicKey, msg: &msgs::AcceptChannel) { - ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); + ErroringMessageHandler::push_error( + self, + their_node_id, + msg.common_fields.temporary_channel_id, + ); } fn handle_funding_created(&self, their_node_id: PublicKey, msg: &msgs::FundingCreated) { ErroringMessageHandler::push_error(self, their_node_id, msg.temporary_channel_id); @@ -327,7 +436,9 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_update_fail_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } - fn handle_update_fail_malformed_htlc(&self, their_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) { + fn handle_update_fail_malformed_htlc( + &self, their_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC, + ) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &msgs::CommitmentSigned) { @@ -345,7 +456,9 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_update_fee(&self, their_node_id: PublicKey, msg: &msgs::UpdateFee) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } - fn handle_announcement_signatures(&self, their_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) { + fn handle_announcement_signatures( + &self, their_node_id: PublicKey, msg: &msgs::AnnouncementSignatures, + ) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &msgs::ChannelReestablish) { @@ -355,7 +468,10 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {} fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorage) {} - fn handle_peer_storage_retrieval(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {} + fn handle_peer_storage_retrieval( + &self, _their_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval, + ) { + } fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {} @@ -367,11 +483,19 @@ impl ChannelMessageHandler for ErroringMessageHandler { } fn handle_open_channel_v2(&self, their_node_id: PublicKey, msg: &msgs::OpenChannelV2) { - ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); + ErroringMessageHandler::push_error( + self, + their_node_id, + msg.common_fields.temporary_channel_id, + ); } fn handle_accept_channel_v2(&self, their_node_id: PublicKey, msg: &msgs::AcceptChannelV2) { - ErroringMessageHandler::push_error(self, their_node_id, msg.common_fields.temporary_channel_id); + ErroringMessageHandler::push_error( + self, + their_node_id, + msg.common_fields.temporary_channel_id, + ); } fn handle_tx_add_input(&self, their_node_id: PublicKey, msg: &msgs::TxAddInput) { @@ -415,11 +539,14 @@ impl ChannelMessageHandler for ErroringMessageHandler { impl Deref for ErroringMessageHandler { type Target = ErroringMessageHandler; - fn deref(&self) -> &Self { self } + fn deref(&self) -> &Self { + self + } } /// Provides references to trait impls which handle different types of messages. -pub struct MessageHandler where +pub struct MessageHandler +where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, @@ -459,7 +586,7 @@ pub struct MessageHandler where /// further calls to the [`PeerManager`] related to the original socket occur. This allows you to /// use a file descriptor for your SocketDescriptor directly, however for simplicity you may wish /// to simply use another value which is guaranteed to be globally unique instead. -pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone { +pub trait SocketDescriptor: cmp::Eq + hash::Hash + Clone { /// Attempts to send some data from the given slice to the peer. /// /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected. @@ -509,7 +636,7 @@ pub struct PeerDetails { /// generate no further read_event/write_buffer_space_avail/socket_disconnected calls for the /// descriptor. #[derive(Clone)] -pub struct PeerHandleError { } +pub struct PeerHandleError {} impl fmt::Debug for PeerHandleError { fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> { formatter.write_str("Peer Sent Invalid Data") @@ -522,7 +649,7 @@ impl fmt::Display for PeerHandleError { } /// Internal struct for keeping track of the gossip syncing progress with a given peer -enum InitSyncTracker{ +enum InitSyncTracker { /// Only sync ad-hoc gossip as it comes in, do not send historical gossip. /// Upon receipt of a GossipTimestampFilter message, this is the default initial state if the /// contained timestamp is less than 6 hours old. @@ -547,7 +674,8 @@ const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO: usize = 2; const OUTBOUND_BUFFER_LIMIT_READ_PAUSE: usize = 12; /// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to /// the peer. -const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO; +const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP: usize = + OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO; /// If we've sent a ping, and are still awaiting a response, we may need to churn our way through /// the socket receive buffer before receiving the ping. @@ -635,11 +763,14 @@ impl Peer { /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already /// sent the old versions, we should send the update, and so return true here. fn should_forward_channel_announcement(&self, channel_id: u64) -> bool { - if !self.handshake_complete() { return false; } - if self.their_features.as_ref().unwrap().supports_gossip_queries() && - !self.sent_gossip_timestamp_filter { - return false; - } + if !self.handshake_complete() { + return false; + } + if self.their_features.as_ref().unwrap().supports_gossip_queries() + && !self.sent_gossip_timestamp_filter + { + return false; + } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(i) => i < channel_id, @@ -649,15 +780,20 @@ impl Peer { /// Similar to the above, but for node announcements indexed by node_id. fn should_forward_node_announcement(&self, node_id: NodeId) -> bool { - if !self.handshake_complete() { return false; } - if self.their_features.as_ref().unwrap().supports_gossip_queries() && - !self.sent_gossip_timestamp_filter { - return false; - } + if !self.handshake_complete() { + return false; + } + if self.their_features.as_ref().unwrap().supports_gossip_queries() + && !self.sent_gossip_timestamp_filter + { + return false; + } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(_) => false, - InitSyncTracker::NodesSyncing(sync_node_id) => sync_node_id.as_slice() < node_id.as_slice(), + InitSyncTracker::NodesSyncing(sync_node_id) => { + sync_node_id.as_slice() < node_id.as_slice() + }, } } @@ -667,14 +803,15 @@ impl Peer { if !gossip_processing_backlogged { self.received_channel_announce_since_backlogged = false; } - self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE && - (!gossip_processing_backlogged || !self.received_channel_announce_since_backlogged) + self.pending_outbound_buffer.len() < OUTBOUND_BUFFER_LIMIT_READ_PAUSE + && (!gossip_processing_backlogged || !self.received_channel_announce_since_backlogged) } /// Determines if we should push additional gossip background sync (aka "backfill") onto a peer's /// outbound buffer. This is checked every time the peer's buffer may have been drained. fn should_buffer_gossip_backfill(&self) -> bool { - self.pending_outbound_buffer.is_empty() && self.gossip_broadcast_buffer.is_empty() + self.pending_outbound_buffer.is_empty() + && self.gossip_broadcast_buffer.is_empty() && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK && self.handshake_complete() } @@ -682,14 +819,16 @@ impl Peer { /// Determines if we should push an onion message onto a peer's outbound buffer. This is checked /// every time the peer's buffer may have been drained. fn should_buffer_onion_message(&self) -> bool { - self.pending_outbound_buffer.is_empty() && self.handshake_complete() + self.pending_outbound_buffer.is_empty() + && self.handshake_complete() && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK } /// Determines if we should push additional gossip broadcast messages onto a peer's outbound /// buffer. This is checked every time the peer's buffer may have been drained. fn should_buffer_gossip_broadcast(&self) -> bool { - self.pending_outbound_buffer.is_empty() && self.handshake_complete() + self.pending_outbound_buffer.is_empty() + && self.handshake_complete() && self.msgs_sent_since_pong < BUFFER_DRAIN_MSGS_PER_TICK } @@ -698,8 +837,9 @@ impl Peer { let total_outbound_buffered = self.gossip_broadcast_buffer.len() + self.pending_outbound_buffer.len(); - total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP || - self.msgs_sent_since_pong > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO + total_outbound_buffered > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + || self.msgs_sent_since_pong + > BUFFER_DRAIN_MSGS_PER_TICK * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO } fn set_their_node_id(&mut self, node_id: PublicKey) { @@ -722,7 +862,7 @@ pub type SimpleArcPeerManager = PeerManager< Arc>, Arc, IgnoringMessageHandler, - Arc + Arc, >; /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference @@ -747,7 +887,6 @@ pub type SimpleRefPeerManager< &'c KeysManager >; - /// A generic trait which is implemented for all [`PeerManager`]s. This makes bounding functions or /// structs on any [`PeerManager`] much simpler as only this trait is needed as a bound, rather /// than the full set of bounds on [`PeerManager`] itself. @@ -758,23 +897,33 @@ pub type SimpleRefPeerManager< pub trait APeerManager { type Descriptor: SocketDescriptor; type CMT: ChannelMessageHandler + ?Sized; - type CM: Deref; + type CM: Deref; type RMT: RoutingMessageHandler + ?Sized; - type RM: Deref; + type RM: Deref; type OMT: OnionMessageHandler + ?Sized; - type OM: Deref; + type OM: Deref; type LT: Logger + ?Sized; - type L: Deref; + type L: Deref; type CMHT: CustomMessageHandler + ?Sized; - type CMH: Deref; + type CMH: Deref; type NST: NodeSigner + ?Sized; - type NS: Deref; + type NS: Deref; /// Gets a reference to the underlying [`PeerManager`]. - fn as_ref(&self) -> &PeerManager; + fn as_ref( + &self, + ) -> &PeerManager; } -impl -APeerManager for PeerManager where +impl< + Descriptor: SocketDescriptor, + CM: Deref, + RM: Deref, + OM: Deref, + L: Deref, + CMH: Deref, + NS: Deref, + > APeerManager for PeerManager +where CM::Target: ChannelMessageHandler, RM::Target: RoutingMessageHandler, OM::Target: OnionMessageHandler, @@ -795,7 +944,9 @@ APeerManager for PeerManager where type CMH = CMH; type NST = ::Target; type NS = NS; - fn as_ref(&self) -> &PeerManager { self } + fn as_ref(&self) -> &PeerManager { + self + } } /// A PeerManager manages a set of peers, described by their [`SocketDescriptor`] and marshalls @@ -817,13 +968,22 @@ APeerManager for PeerManager where /// you're using lightning-net-tokio. /// /// [`read_event`]: PeerManager::read_event -pub struct PeerManager where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner { +pub struct PeerManager< + Descriptor: SocketDescriptor, + CM: Deref, + RM: Deref, + OM: Deref, + L: Deref, + CMH: Deref, + NS: Deref, +> where + CM::Target: ChannelMessageHandler, + RM::Target: RoutingMessageHandler, + OM::Target: OnionMessageHandler, + L::Target: Logger, + CMH::Target: CustomMessageHandler, + NS::Target: NodeSigner, +{ message_handler: MessageHandler, /// Connection state for each connected peer - we have an outer read-write lock which is taken /// as read while we're doing processing for a peer and taken write when a peer is being added @@ -866,7 +1026,7 @@ pub struct PeerManager + secp_ctx: Secp256k1, } enum LogicalMessage { @@ -896,14 +1056,17 @@ macro_rules! encode_msg { let mut buffer = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); wire::write($msg, &mut buffer).unwrap(); buffer.0 - }} + }}; } -impl PeerManager where - CM::Target: ChannelMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - NS::Target: NodeSigner { +impl + PeerManager +where + CM::Target: ChannelMessageHandler, + OM::Target: OnionMessageHandler, + L::Target: Logger, + NS::Target: NodeSigner, +{ /// Constructs a new `PeerManager` with the given `ChannelMessageHandler` and /// `OnionMessageHandler`. No routing message handler is used and network graph messages are /// ignored. @@ -917,20 +1080,39 @@ impl Pe /// minute should suffice. /// /// This is not exported to bindings users as we can't export a PeerManager with a dummy route handler - pub fn new_channel_only(channel_message_handler: CM, onion_message_handler: OM, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self { - Self::new(MessageHandler { - chan_handler: channel_message_handler, - route_handler: IgnoringMessageHandler{}, - onion_message_handler, - custom_message_handler: IgnoringMessageHandler{}, - }, current_time, ephemeral_random_data, logger, node_signer) + pub fn new_channel_only( + channel_message_handler: CM, onion_message_handler: OM, current_time: u32, + ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS, + ) -> Self { + Self::new( + MessageHandler { + chan_handler: channel_message_handler, + route_handler: IgnoringMessageHandler {}, + onion_message_handler, + custom_message_handler: IgnoringMessageHandler {}, + }, + current_time, + ephemeral_random_data, + logger, + node_signer, + ) } } -impl PeerManager where - RM::Target: RoutingMessageHandler, - L::Target: Logger, - NS::Target: NodeSigner { +impl + PeerManager< + Descriptor, + ErroringMessageHandler, + RM, + IgnoringMessageHandler, + L, + IgnoringMessageHandler, + NS, + > where + RM::Target: RoutingMessageHandler, + L::Target: Logger, + NS::Target: NodeSigner, +{ /// Constructs a new `PeerManager` with the given `RoutingMessageHandler`. No channel message /// handler or onion message handler is used and onion and channel messages will be ignored (or /// generate error messages). Note that some other lightning implementations time-out connections @@ -945,13 +1127,22 @@ impl PeerManager Self { - Self::new(MessageHandler { - chan_handler: ErroringMessageHandler::new(), - route_handler: routing_message_handler, - onion_message_handler: IgnoringMessageHandler{}, - custom_message_handler: IgnoringMessageHandler{}, - }, current_time, ephemeral_random_data, logger, node_signer) + pub fn new_routing_only( + routing_message_handler: RM, current_time: u32, ephemeral_random_data: &[u8; 32], + logger: L, node_signer: NS, + ) -> Self { + Self::new( + MessageHandler { + chan_handler: ErroringMessageHandler::new(), + route_handler: routing_message_handler, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: IgnoringMessageHandler {}, + }, + current_time, + ephemeral_random_data, + logger, + node_signer, + ) } } @@ -962,7 +1153,11 @@ impl PeerManager(&'a Option<(PublicKey, NodeId)>); impl core::fmt::Display for OptionalFromDebugger<'_> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - if let Some((node_id, _)) = self.0 { write!(f, " from {}", log_pubkey!(node_id)) } else { Ok(()) } + if let Some((node_id, _)) = self.0 { + write!(f, " from {}", log_pubkey!(node_id)) + } else { + Ok(()) + } } } @@ -970,39 +1165,51 @@ impl core::fmt::Display for OptionalFromDebugger<'_> { /// /// fn filter_addresses(ip_address: Option) -> Option { - match ip_address{ + match ip_address { // For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8) - Some(SocketAddress::TcpIpV4{addr: [10, _, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [10, _, _, _], port: _ }) => None, // For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8) - Some(SocketAddress::TcpIpV4{addr: [0, _, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [0, _, _, _], port: _ }) => None, // For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10) - Some(SocketAddress::TcpIpV4{addr: [100, 64..=127, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [100, 64..=127, _, _], port: _ }) => None, // For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8) - Some(SocketAddress::TcpIpV4{addr: [127, _, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [127, _, _, _], port: _ }) => None, // For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16) - Some(SocketAddress::TcpIpV4{addr: [169, 254, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [169, 254, _, _], port: _ }) => None, // For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12) - Some(SocketAddress::TcpIpV4{addr: [172, 16..=31, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [172, 16..=31, _, _], port: _ }) => None, // For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16) - Some(SocketAddress::TcpIpV4{addr: [192, 168, _, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [192, 168, _, _], port: _ }) => None, // For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24) - Some(SocketAddress::TcpIpV4{addr: [192, 88, 99, _], port: _}) => None, + Some(SocketAddress::TcpIpV4 { addr: [192, 88, 99, _], port: _ }) => None, // For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3) - Some(SocketAddress::TcpIpV6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address, + Some(SocketAddress::TcpIpV6 { + addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], + port: _, + }) => ip_address, // For remaining addresses - Some(SocketAddress::TcpIpV6{addr: _, port: _}) => None, + Some(SocketAddress::TcpIpV6 { addr: _, port: _ }) => None, Some(..) => ip_address, None => None, } } -impl PeerManager where - CM::Target: ChannelMessageHandler, - RM::Target: RoutingMessageHandler, - OM::Target: OnionMessageHandler, - L::Target: Logger, - CMH::Target: CustomMessageHandler, - NS::Target: NodeSigner +impl< + Descriptor: SocketDescriptor, + CM: Deref, + RM: Deref, + OM: Deref, + L: Deref, + CMH: Deref, + NS: Deref, + > PeerManager +where + CM::Target: ChannelMessageHandler, + RM::Target: RoutingMessageHandler, + OM::Target: OnionMessageHandler, + L::Target: Logger, + CMH::Target: CustomMessageHandler, + NS::Target: NodeSigner, { /// Constructs a new `PeerManager` with the given message handlers. /// @@ -1013,7 +1220,10 @@ impl, current_time: u32, ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS) -> Self { + pub fn new( + message_handler: MessageHandler, current_time: u32, + ephemeral_random_data: &[u8; 32], logger: L, node_signer: NS, + ) -> Self { let mut ephemeral_key_midstate = Sha256::engine(); ephemeral_key_midstate.input(ephemeral_random_data); @@ -1096,7 +1306,8 @@ impl InitFeatures { @@ -1121,8 +1332,12 @@ impl) -> Result, PeerHandleError> { - let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key()); + pub fn new_outbound_connection( + &self, their_node_id: PublicKey, descriptor: Descriptor, + remote_network_address: Option, + ) -> Result, PeerHandleError> { + let mut peer_encryptor = + PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key()); let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec(); let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes @@ -1161,7 +1376,7 @@ impl) -> Result<(), PeerHandleError> { + pub fn new_inbound_connection( + &self, descriptor: Descriptor, remote_network_address: Option, + ) -> Result<(), PeerHandleError> { let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer); let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes @@ -1219,7 +1436,7 @@ impl { @@ -1305,7 +1529,7 @@ impl buff, }; @@ -1342,20 +1566,22 @@ impl Result<(), PeerHandleError> { + pub fn write_buffer_space_avail( + &self, descriptor: &mut Descriptor, + ) -> Result<(), PeerHandleError> { let peers = self.peers.read().unwrap(); match peers.get(descriptor) { None => { // This is most likely a simple race condition where the user found that the socket // was writeable, then we told the user to `disconnect_socket()`, then they called // this method. Return an error to make sure we get disconnected. - return Err(PeerHandleError { }); + return Err(PeerHandleError {}); }, Some(peer_mutex) => { let mut peer = peer_mutex.lock().unwrap(); peer.awaiting_write_event = false; self.do_attempt_write_data(descriptor, &mut peer, false); - } + }, }; Ok(()) } @@ -1377,13 +1603,15 @@ impl Result { + pub fn read_event( + &self, peer_descriptor: &mut Descriptor, data: &[u8], + ) -> Result { match self.do_read_event(peer_descriptor, data) { Ok(res) => Ok(res), Err(e) => { self.disconnect_event_internal(peer_descriptor, "of a protocol error"); Err(e) - } + }, } } @@ -1408,7 +1636,9 @@ impl Result { + fn do_read_event( + &self, peer_descriptor: &mut Descriptor, data: &[u8], + ) -> Result { let mut pause_read = false; let peers = self.peers.read().unwrap(); let mut msgs_to_forward = Vec::new(); @@ -1481,8 +1711,13 @@ impl peer.pending_read_buffer_pos); { - let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos); - peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]); + let data_to_copy = cmp::min( + peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, + data.len() - read_pos, + ); + peer.pending_read_buffer + [peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy] + .copy_from_slice(&data[read_pos..read_pos + data_to_copy]); read_pos += data_to_copy; peer.pending_read_buffer_pos += data_to_copy; } @@ -1518,20 +1753,25 @@ impl { let act_two = try_potential_handleerror!( peer, - peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], - &self.node_signer, - self.get_ephemeral_key(), - &self.secp_ctx - )).to_vec(); + peer.channel_encryptor.process_act_one_with_keys( + &peer.pending_read_buffer[..], + &self.node_signer, + self.get_ephemeral_key(), + &self.secp_ctx + ) + ) + .to_vec(); peer.pending_outbound_buffer.push_back(act_two); peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long }, NextNoiseStep::ActTwo => { let (act_three, their_node_id) = try_potential_handleerror!( peer, - peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], - &self.node_signer - )); + peer.channel_encryptor.process_act_two( + &peer.pending_read_buffer[..], + &self.node_signer + ) + ); peer.pending_outbound_buffer.push_back(act_three.to_vec()); peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes peer.pending_read_is_header = true; @@ -1540,13 +1780,20 @@ impl { let their_node_id = try_potential_handleerror!( peer, - peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]) + peer.channel_encryptor + .process_act_three(&peer.pending_read_buffer[..]) ); peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes peer.pending_read_is_header = true; @@ -1554,40 +1801,55 @@ impl { if peer.pending_read_is_header { let msg_len = try_potential_handleerror!( peer, - peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]) + peer.channel_encryptor + .decrypt_length_header(&peer.pending_read_buffer[..]) ); - if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } + if peer.pending_read_buffer.capacity() > 8192 { + peer.pending_read_buffer = Vec::new(); + } peer.pending_read_buffer.resize(msg_len as usize + 16, 0); - if msg_len < 2 { // Need at least the message type tag - return Err(PeerHandleError { }); + if msg_len < 2 { + // Need at least the message type tag + return Err(PeerHandleError {}); } peer.pending_read_is_header = false; } else { debug_assert!(peer.pending_read_buffer.len() >= 2 + 16); try_potential_handleerror!( peer, - peer.channel_encryptor.decrypt_message(&mut peer.pending_read_buffer[..]) + peer.channel_encryptor + .decrypt_message(&mut peer.pending_read_buffer[..]) ); let message_result = wire::read( - &mut &peer.pending_read_buffer[..peer.pending_read_buffer.len() - 16], - &*self.message_handler.custom_message_handler - ); + &mut &peer.pending_read_buffer + [..peer.pending_read_buffer.len() - 16], + &*self.message_handler.custom_message_handler, + ); // Reset read buffer - if peer.pending_read_buffer.capacity() > 8192 { peer.pending_read_buffer = Vec::new(); } + if peer.pending_read_buffer.capacity() > 8192 { + peer.pending_read_buffer = Vec::new(); + } peer.pending_read_buffer.resize(18, 0); peer.pending_read_is_header = true; let their_node_id = peer.their_node_id.map(|p| p.0); - let logger = WithContext::from(&self.logger, their_node_id, None, None); + let logger = + WithContext::from(&self.logger, their_node_id, None, None); let message = match message_result { Ok(x) => x, Err(e) => { @@ -1596,52 +1858,64 @@ impl { + ( + msgs::DecodeError::UnknownRequiredFeature, + Some(ty), + ) if is_gossip_msg(ty) => { log_gossip!(logger, "Got a channel/node announcement with an unknown required feature flag, you may want to update!"); continue; - } + }, (msgs::DecodeError::UnsupportedCompression, _) => { log_gossip!(logger, "We don't support zlib-compressed message fields, sending a warning and ignoring message"); let channel_id = ChannelId::new_zero(); - let data = "Unsupported message compression: zlib".to_owned(); + let data = "Unsupported message compression: zlib" + .to_owned(); let msg = msgs::WarningMessage { channel_id, data }; self.enqueue_message(peer, &msg); continue; - } + }, (_, Some(ty)) if is_gossip_msg(ty) => { log_gossip!(logger, "Got an invalid value while deserializing a gossip message"); let channel_id = ChannelId::new_zero(); - let data = format!("Unreadable/bogus gossip message of type {}", ty); - let msg = msgs::WarningMessage { - channel_id, - data, - }; + let data = format!( + "Unreadable/bogus gossip message of type {}", + ty + ); + let msg = msgs::WarningMessage { channel_id, data }; self.enqueue_message(peer, &msg); continue; - } + }, (msgs::DecodeError::UnknownRequiredFeature, _) => { log_debug!(logger, "Received a message with an unknown required feature flag or TLV, you may want to update!"); - return Err(PeerHandleError { }); - } - (msgs::DecodeError::UnknownVersion, _) => return Err(PeerHandleError { }), + return Err(PeerHandleError {}); + }, + (msgs::DecodeError::UnknownVersion, _) => { + return Err(PeerHandleError {}) + }, (msgs::DecodeError::InvalidValue, _) => { log_debug!(logger, "Got an invalid value while deserializing message"); - return Err(PeerHandleError { }); - } + return Err(PeerHandleError {}); + }, (msgs::DecodeError::ShortRead, _) => { log_debug!(logger, "Deserialization failed due to shortness of message"); - return Err(PeerHandleError { }); - } - (msgs::DecodeError::BadLengthDescriptor, _) => return Err(PeerHandleError { }), - (msgs::DecodeError::Io(_), _) => return Err(PeerHandleError { }), - (msgs::DecodeError::DangerousValue, _) => return Err(PeerHandleError { }), + return Err(PeerHandleError {}); + }, + (msgs::DecodeError::BadLengthDescriptor, _) => { + return Err(PeerHandleError {}) + }, + (msgs::DecodeError::Io(_), _) => { + return Err(PeerHandleError {}) + }, + (msgs::DecodeError::DangerousValue, _) => { + return Err(PeerHandleError {}) + }, } - } + }, }; msg_to_handle = Some(message); } - } + }, } } pause_read = !self.peer_should_read(peer); @@ -1649,7 +1923,7 @@ impl match handling_error { - MessageHandlingError::PeerHandleError(e) => { return Err(e) }, + MessageHandlingError::PeerHandleError(e) => return Err(e), MessageHandlingError::LightningError(e) => { try_potential_handleerror!(&mut peer_mutex.lock().unwrap(), Err(e)); }, @@ -1669,7 +1943,12 @@ impl, - peer_lock: MutexGuard, - message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage> - ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { - let their_node_id = peer_lock.their_node_id.expect("We know the peer's public key by the time we receive messages").0; + &self, peer_mutex: &Mutex, peer_lock: MutexGuard, + message: wire::Message< + <::Target as wire::CustomMessageReader>::CustomMessage, + >, + ) -> Result< + Option::Target as wire::CustomMessageReader>::CustomMessage>>, + MessageHandlingError, + > { + let their_node_id = peer_lock + .their_node_id + .expect("We know the peer's public key by the time we receive messages") + .0; let logger = WithContext::from(&self.logger, Some(their_node_id), None, None); - let unprocessed_message = self.do_handle_message_holding_peer_lock(peer_lock, message, their_node_id, &logger)?; + let unprocessed_message = + self.do_handle_message_holding_peer_lock(peer_lock, message, their_node_id, &logger)?; self.message_handler.chan_handler.message_received(); match unprocessed_message { - Some(LogicalMessage::FromWire(message)) => { - self.do_handle_message_without_peer_lock(peer_mutex, message, their_node_id, &logger) - }, + Some(LogicalMessage::FromWire(message)) => self.do_handle_message_without_peer_lock( + peer_mutex, + message, + their_node_id, + &logger, + ), Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch)) => { - log_trace!(logger, "Received commitment_signed batch {:?} from {}", batch, log_pubkey!(their_node_id)); + log_trace!( + logger, + "Received commitment_signed batch {:?} from {}", + batch, + log_pubkey!(their_node_id) + ); let chan_handler = &self.message_handler.chan_handler; chan_handler.handle_commitment_signed_batch(their_node_id, channel_id, batch); return Ok(None); @@ -1710,13 +2004,17 @@ impl( - &self, - mut peer_lock: MutexGuard, - message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, - their_node_id: PublicKey, - logger: &WithContext<'a, L> - ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> - { + &self, mut peer_lock: MutexGuard, + message: wire::Message< + <::Target as wire::CustomMessageReader>::CustomMessage, + >, + their_node_id: PublicKey, logger: &WithContext<'a, L>, + ) -> Result< + Option< + LogicalMessage<<::Target as wire::CustomMessageReader>::CustomMessage>, + >, + MessageHandlingError, + > { peer_lock.received_message_since_timer_tick = true; // Need an Init as first message @@ -1736,29 +2034,42 @@ impl { entry.insert(msg); }, + btree_map::Entry::Vacant(entry) => { + entry.insert(msg); + }, btree_map::Entry::Occupied(_) => { log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), channel_id, &batch.funding_txid); - return Err(PeerHandleError { }.into()); - } + return Err(PeerHandleError {}.into()); + }, } if buffer.len() >= batch_size { - let (channel_id, batch) = peer_lock.commitment_signed_batch.take().expect("batch should have been inserted"); + let (channel_id, batch) = peer_lock + .commitment_signed_batch + .take() + .expect("batch should have been inserted"); return Ok(Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch))); } else { return Ok(None); } } else if peer_lock.commitment_signed_batch.is_some() { log_debug!(logger, "Peer {} sent non-batched commitment_signed for channel {} when expecting batched commitment_signed", log_pubkey!(their_node_id), &msg.channel_id); - return Err(PeerHandleError { }.into()); + return Err(PeerHandleError {}.into()); } else { return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); } } else if peer_lock.commitment_signed_batch.is_some() { log_debug!(logger, "Peer {} sent non-commitment_signed message when expecting batched commitment_signed", log_pubkey!(their_node_id)); - return Err(PeerHandleError { }.into()); + return Err(PeerHandleError {}.into()); } if let wire::Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start initial gossip sync only after we receive // a GossipTimestampFilter - if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() && - !peer_lock.sent_gossip_timestamp_filter { + if peer_lock.their_features.as_ref().unwrap().supports_gossip_queries() + && !peer_lock.sent_gossip_timestamp_filter + { peer_lock.sent_gossip_timestamp_filter = true; #[allow(unused_mut)] @@ -1860,7 +2198,10 @@ impl 1970").as_secs() - 6 * 3600; + let full_sync_threshold = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs() - 6 * 3600; if (_msg.first_timestamp as u64) > full_sync_threshold { should_do_full_sync = false; } @@ -1885,17 +2226,29 @@ impl( - &self, - peer_mutex: &Mutex, - message: wire::Message<<::Target as wire::CustomMessageReader>::CustomMessage>, - their_node_id: PublicKey, - logger: &WithContext<'a, L> - ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> - { + &self, peer_mutex: &Mutex, + message: wire::Message< + <::Target as wire::CustomMessageReader>::CustomMessage, + >, + their_node_id: PublicKey, logger: &WithContext<'a, L>, + ) -> Result< + Option::Target as wire::CustomMessageReader>::CustomMessage>>, + MessageHandlingError, + > { if is_gossip_msg(message.type_id()) { - log_gossip!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id)); + log_gossip!( + logger, + "Received message {:?} from {}", + message, + log_pubkey!(their_node_id) + ); } else { - log_trace!(logger, "Received message {:?} from {}", message, log_pubkey!(their_node_id)); + log_trace!( + logger, + "Received message {:?} from {}", + message, + log_pubkey!(their_node_id) + ); } let mut should_forward = None; @@ -1909,14 +2262,24 @@ impl { - log_debug!(logger, "Got Err message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data)); + log_debug!( + logger, + "Got Err message from {}: {}", + log_pubkey!(their_node_id), + PrintableString(&msg.data) + ); self.message_handler.chan_handler.handle_error(their_node_id, &msg); if msg.channel_id.is_zero() { - return Err(PeerHandleError { }.into()); + return Err(PeerHandleError {}.into()); } }, wire::Message::Warning(msg) => { - log_debug!(logger, "Got warning message from {}: {}", log_pubkey!(their_node_id), PrintableString(&msg.data)); + log_debug!( + logger, + "Got warning message from {}: {}", + log_pubkey!(their_node_id), + PrintableString(&msg.data) + ); }, wire::Message::Ping(msg) => { @@ -1964,21 +2327,21 @@ impl { self.message_handler.chan_handler.handle_stfu(their_node_id, &msg); - } + }, #[cfg(splicing)] // Splicing messages: wire::Message::SpliceInit(msg) => { self.message_handler.chan_handler.handle_splice_init(their_node_id, &msg); - } + }, #[cfg(splicing)] wire::Message::SpliceAck(msg) => { self.message_handler.chan_handler.handle_splice_ack(their_node_id, &msg); - } + }, #[cfg(splicing)] wire::Message::SpliceLocked(msg) => { self.message_handler.chan_handler.handle_splice_locked(their_node_id, &msg); - } + }, // Interactive transaction construction messages: wire::Message::TxAddInput(msg) => { @@ -2007,7 +2370,7 @@ impl { self.message_handler.chan_handler.handle_tx_abort(their_node_id, &msg); - } + }, wire::Message::Shutdown(msg) => { self.message_handler.chan_handler.handle_shutdown(their_node_id, &msg); @@ -2051,24 +2414,30 @@ impl { let route_handler = &self.message_handler.route_handler; - if route_handler.handle_channel_announcement(Some(their_node_id), &msg) - .map_err(|e| -> MessageHandlingError { e.into() })? { + if route_handler + .handle_channel_announcement(Some(their_node_id), &msg) + .map_err(|e| -> MessageHandlingError { e.into() })? + { should_forward = Some(wire::Message::ChannelAnnouncement(msg)); } self.update_gossip_backlogged(); }, wire::Message::NodeAnnouncement(msg) => { let route_handler = &self.message_handler.route_handler; - if route_handler.handle_node_announcement(Some(their_node_id), &msg) - .map_err(|e| -> MessageHandlingError { e.into() })? { + if route_handler + .handle_node_announcement(Some(their_node_id), &msg) + .map_err(|e| -> MessageHandlingError { e.into() })? + { should_forward = Some(wire::Message::NodeAnnouncement(msg)); } self.update_gossip_backlogged(); }, wire::Message::ChannelUpdate(msg) => { let route_handler = &self.message_handler.route_handler; - if route_handler.handle_channel_update(Some(their_node_id), &msg) - .map_err(|e| -> MessageHandlingError { e.into() })? { + if route_handler + .handle_channel_update(Some(their_node_id), &msg) + .map_err(|e| -> MessageHandlingError { e.into() })? + { should_forward = Some(wire::Message::ChannelUpdate(msg)); } self.update_gossip_backlogged(); @@ -2098,8 +2467,12 @@ impl { - log_debug!(logger, "Received unknown even message of type {}, disconnecting peer!", type_id); - return Err(PeerHandleError { }.into()); + log_debug!( + logger, + "Received unknown even message of type {}, disconnecting peer!", + type_id + ); + return Err(PeerHandleError {}.into()); }, wire::Message::Unknown(type_id) => { log_trace!(logger, "Received unknown odd message of type {}, ignoring", type_id); @@ -2130,45 +2503,67 @@ impl { - log_gossip!(self.logger, "Sending message to all peers except {:?} or the announced node: {:?}", except_node, msg); + log_gossip!( + self.logger, + "Sending message to all peers except {:?} or the announced node: {:?}", + except_node, + msg + ); let encoded_msg = encode_msg!(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); - if !peer.handshake_complete() || - !peer.should_forward_node_announcement(msg.contents.node_id) { - continue + if !peer.handshake_complete() + || !peer.should_forward_node_announcement(msg.contents.node_id) + { + continue; } debug_assert!(peer.their_node_id.is_some()); debug_assert!(peer.channel_encryptor.is_ready_for_encryption()); let their_node_id = peer.their_node_id.map(|p| p.0); let logger = WithContext::from(&self.logger, their_node_id, None, None); if peer.buffer_full_drop_gossip_broadcast() && !allow_large_buffer { - log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + log_gossip!( + logger, + "Skipping broadcast message to {:?} as its outbound buffer is full", + peer.their_node_id + ); continue; } if let Some((_, their_node_id)) = peer.their_node_id { @@ -2176,37 +2571,59 @@ impl { - log_gossip!(self.logger, "Sending message to all peers except {:?}: {:?}", except_node, msg); + log_gossip!( + self.logger, + "Sending message to all peers except {:?}: {:?}", + except_node, + msg + ); let encoded_msg = encode_msg!(msg); for (_, peer_mutex) in peers.iter() { let mut peer = peer_mutex.lock().unwrap(); - if !peer.handshake_complete() || - !peer.should_forward_channel_announcement(msg.contents.short_channel_id) { - continue + if !peer.handshake_complete() + || !peer.should_forward_channel_announcement(msg.contents.short_channel_id) + { + continue; } debug_assert!(peer.their_node_id.is_some()); debug_assert!(peer.channel_encryptor.is_ready_for_encryption()); let their_node_id = peer.their_node_id.map(|p| p.0); let logger = WithContext::from(&self.logger, their_node_id, None, None); if peer.buffer_full_drop_gossip_broadcast() && !allow_large_buffer { - log_gossip!(logger, "Skipping broadcast message to {:?} as its outbound buffer is full", peer.their_node_id); + log_gossip!( + logger, + "Skipping broadcast message to {:?} as its outbound buffer is full", + peer.their_node_id + ); continue; } - if except_node.is_some() && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node { + if except_node.is_some() + && peer.their_node_id.as_ref().map(|(pk, _)| pk) == except_node + { continue; } - self.enqueue_encoded_gossip_broadcast(&mut *peer, MessageBuf::from_encoded(&encoded_msg)); + self.enqueue_encoded_gossip_broadcast( + &mut *peer, + MessageBuf::from_encoded(&encoded_msg), + ); } }, - _ => debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages"), + _ => { + debug_assert!(false, "We shouldn't attempt to forward anything but gossip messages") + }, } } @@ -2239,7 +2656,8 @@ impl { - { - if peers_to_disconnect.get($node_id).is_some() { - // If we've "disconnected" this peer, do not send to it. - None - } else { - let descriptor_opt = self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned(); - match descriptor_opt { - Some(descriptor) => match peers.get(&descriptor) { - Some(peer_mutex) => { - let peer_lock = peer_mutex.lock().unwrap(); - if !peer_lock.handshake_complete() { - None - } else { - Some(peer_lock) - } - }, - None => { - debug_assert!(false, "Inconsistent peers set state!"); + ($node_id: expr) => {{ + if peers_to_disconnect.get($node_id).is_some() { + // If we've "disconnected" this peer, do not send to it. + None + } else { + let descriptor_opt = + self.node_id_to_descriptor.lock().unwrap().get($node_id).cloned(); + match descriptor_opt { + Some(descriptor) => match peers.get(&descriptor) { + Some(peer_mutex) => { + let peer_lock = peer_mutex.lock().unwrap(); + if !peer_lock.handshake_complete() { None + } else { + Some(peer_lock) } }, None => { + debug_assert!(false, "Inconsistent peers set state!"); None }, - } + }, + None => None, } } - } + }}; } let route_handler = &self.message_handler.route_handler; @@ -2289,11 +2704,19 @@ impl { - log_debug!(self.logger, "Handling SendPeerStorage event in peer_handler for {}", log_pubkey!(node_id)); + log_debug!( + self.logger, + "Handling SendPeerStorage event in peer_handler for {}", + log_pubkey!(node_id) + ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } => { - log_debug!(self.logger, "Handling SendPeerStorageRetrieval event in peer_handler for {}", log_pubkey!(node_id)); + log_debug!( + self.logger, + "Handling SendPeerStorageRetrieval event in peer_handler for {}", + log_pubkey!(node_id) + ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { @@ -2341,34 +2764,54 @@ impl { - let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None); + MessageSendEvent::SendStfu { ref node_id, ref msg } => { + let logger = WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None, + ); log_debug!(logger, "Handling SendStfu event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - } - MessageSendEvent::SendSpliceInit { ref node_id, ref msg} => { - let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None); + }, + MessageSendEvent::SendSpliceInit { ref node_id, ref msg } => { + let logger = WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None, + ); log_debug!(logger, "Handling SendSpliceInit event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - } - MessageSendEvent::SendSpliceAck { ref node_id, ref msg} => { - let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None); + }, + MessageSendEvent::SendSpliceAck { ref node_id, ref msg } => { + let logger = WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None, + ); log_debug!(logger, "Handling SendSpliceAck event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - } - MessageSendEvent::SendSpliceLocked { ref node_id, ref msg} => { - let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None); + }, + MessageSendEvent::SendSpliceLocked { ref node_id, ref msg } => { + let logger = WithContext::from( + &self.logger, + Some(*node_id), + Some(msg.channel_id), + None, + ); log_debug!(logger, "Handling SendSpliceLocked event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - } + }, MessageSendEvent::SendTxAddInput { ref node_id, ref msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendTxAddInput event in peer_handler for node {} for channel {}", log_pubkey!(node_id), @@ -2429,7 +2872,19 @@ impl { + MessageSendEvent::UpdateHTLCs { + ref node_id, + ref channel_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(*channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails, {} commits for channel {}", log_pubkey!(node_id), update_add_htlcs.len(), @@ -2481,27 +2936,52 @@ impl { + MessageSendEvent::SendChannelAnnouncement { + ref node_id, + ref msg, + ref update_msg, + } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelAnnouncement event in peer_handler for node {} for short channel id {}", log_pubkey!(node_id), msg.contents.short_channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, update_msg); + self.enqueue_message( + &mut *get_peer_for_forwarding!(node_id)?, + update_msg, + ); }, MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg } => { log_debug!(self.logger, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id); match route_handler.handle_channel_announcement(None, &msg) { - Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { + Ok(_) + | Err(LightningError { + action: msgs::ErrorAction::IgnoreDuplicateGossip, + .. + }) => { let forward = wire::Message::ChannelAnnouncement(msg); - self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); + self.forward_broadcast_msg( + peers, + &forward, + None, + from_chan_handler, + ); }, _ => {}, } if let Some(msg) = update_msg { match route_handler.handle_channel_update(None, &msg) { - Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { + Ok(_) + | Err(LightningError { + action: msgs::ErrorAction::IgnoreDuplicateGossip, + .. + }) => { let forward = wire::Message::ChannelUpdate(msg); - self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); + self.forward_broadcast_msg( + peers, + &forward, + None, + from_chan_handler, + ); }, _ => {}, } @@ -2510,9 +2990,18 @@ impl { log_debug!(self.logger, "Handling BroadcastChannelUpdate event in peer_handler for contents {:?}", msg.contents); match route_handler.handle_channel_update(None, &msg) { - Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { + Ok(_) + | Err(LightningError { + action: msgs::ErrorAction::IgnoreDuplicateGossip, + .. + }) => { let forward = wire::Message::ChannelUpdate(msg); - self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); + self.forward_broadcast_msg( + peers, + &forward, + None, + from_chan_handler, + ); }, _ => {}, } @@ -2520,9 +3009,18 @@ impl { log_debug!(self.logger, "Handling BroadcastNodeAnnouncement event in peer_handler for node {}", msg.contents.node_id); match route_handler.handle_node_announcement(None, &msg) { - Ok(_) | Err(LightningError { action: msgs::ErrorAction::IgnoreDuplicateGossip, .. }) => { + Ok(_) + | Err(LightningError { + action: msgs::ErrorAction::IgnoreDuplicateGossip, + .. + }) => { let forward = wire::Message::NodeAnnouncement(msg); - self.forward_broadcast_msg(peers, &forward, None, from_chan_handler); + self.forward_broadcast_msg( + peers, + &forward, + None, + from_chan_handler, + ); }, _ => {}, } @@ -2546,7 +3044,9 @@ impl::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg)); + let msg = msg.map(|msg| { + wire::Message::<<::Target as wire::CustomMessageReader>::CustomMessage>::Error(msg) + }); peers_to_disconnect.insert(node_id, msg); }, msgs::ErrorAction::DisconnectPeerWithWarning { msg } => { @@ -2555,26 +3055,45 @@ impl { - log_given_level!(logger, level, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); + log_given_level!( + logger, + level, + "Received a HandleError event to be ignored for node {}", + log_pubkey!(node_id) + ); }, msgs::ErrorAction::IgnoreDuplicateGossip => {}, msgs::ErrorAction::IgnoreError => { - log_debug!(logger, "Received a HandleError event to be ignored for node {}", log_pubkey!(node_id)); - }, + log_debug!( + logger, + "Received a HandleError event to be ignored for node {}", + log_pubkey!(node_id) + ); + }, msgs::ErrorAction::SendErrorMessage { ref msg } => { log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); - self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id)?, msg); + self.enqueue_message( + &mut *get_peer_for_forwarding!(&node_id)?, + msg, + ); }, - msgs::ErrorAction::SendWarningMessage { ref msg, ref log_level } => { + msgs::ErrorAction::SendWarningMessage { + ref msg, + ref log_level, + } => { log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}", log_pubkey!(node_id), msg.data); - self.enqueue_message(&mut *get_peer_for_forwarding!(&node_id)?, msg); + self.enqueue_message( + &mut *get_peer_for_forwarding!(&node_id)?, + msg, + ); }, } }, @@ -2590,7 +3109,7 @@ impl { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", log_pubkey!(node_id), @@ -2599,14 +3118,14 @@ impl { log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler for node {} with first_timestamp={}, timestamp_range={}", log_pubkey!(node_id), msg.first_timestamp, msg.timestamp_range); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); - } + }, } Some(()) }; @@ -2627,15 +3146,27 @@ impl 0 && !peer.received_message_since_timer_tick; - let reached_threshold_intervals = peer.awaiting_pong_timer_tick_intervals as u64 > - MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64; + let not_recently_active = peer.awaiting_pong_timer_tick_intervals > 0 + && !peer.received_message_since_timer_tick; + let reached_threshold_intervals = peer.awaiting_pong_timer_tick_intervals + as u64 + > MAX_BUFFER_DRAIN_TICK_INTERVALS_PER_PEER as u64 * peers_lock.len() as u64; if not_recently_active || reached_threshold_intervals { descriptors_needing_disconnect.push(descriptor.clone()); break; @@ -2824,14 +3383,15 @@ impl) { + pub fn broadcast_node_announcement( + &self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec, + ) { if addresses.len() > 100 { panic!("More than half the message size was taken up by public addresses!"); } @@ -2900,9 +3463,10 @@ impl sig, Err(_) => { log_error!(self.logger, "Failed to generate signature for node_announcement"); @@ -2910,27 +3474,32 @@ impl bool { match type_id { - msgs::ChannelAnnouncement::TYPE | - msgs::ChannelUpdate::TYPE | - msgs::NodeAnnouncement::TYPE | - msgs::QueryChannelRange::TYPE | - msgs::ReplyChannelRange::TYPE | - msgs::QueryShortChannelIds::TYPE | - msgs::ReplyShortChannelIdsEnd::TYPE => true, - _ => false + msgs::ChannelAnnouncement::TYPE + | msgs::ChannelUpdate::TYPE + | msgs::NodeAnnouncement::TYPE + | msgs::QueryChannelRange::TYPE + | msgs::ReplyChannelRange::TYPE + | msgs::QueryShortChannelIds::TYPE + | msgs::ReplyShortChannelIdsEnd::TYPE => true, + _ => false, } } @@ -2938,18 +3507,18 @@ fn is_gossip_msg(type_id: u16) -> bool { mod tests { use super::*; - use crate::sign::{NodeSigner, Recipient}; use crate::io; - use crate::ln::types::ChannelId; - use crate::types::features::{InitFeatures, NodeFeatures}; + use crate::ln::msgs::{Init, LightningError, SocketAddress}; use crate::ln::peer_channel_encryptor::PeerChannelEncryptor; + use crate::ln::types::ChannelId; use crate::ln::{msgs, wire}; - use crate::ln::msgs::{Init, LightningError, SocketAddress}; + use crate::sign::{NodeSigner, Recipient}; + use crate::types::features::{InitFeatures, NodeFeatures}; use crate::util::test_utils; - use bitcoin::Network; use bitcoin::constants::ChainHash; - use bitcoin::secp256k1::{PublicKey, SecretKey, Secp256k1}; + use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; + use bitcoin::Network; use crate::sync::{Arc, Mutex}; use core::convert::Infallible; @@ -2970,7 +3539,7 @@ mod tests { self.fd == other.fd } } - impl Eq for FileDescriptor { } + impl Eq for FileDescriptor {} impl core::hash::Hash for FileDescriptor { fn hash(&self, hasher: &mut H) { self.fd.hash(hasher) @@ -2987,7 +3556,9 @@ mod tests { } } - fn disconnect_socket(&mut self) { self.disconnect.store(true, Ordering::Release); } + fn disconnect_socket(&mut self) { + self.disconnect.store(true, Ordering::Release); + } } impl FileDescriptor { @@ -3016,16 +3587,15 @@ mod tests { impl TestCustomMessageHandler { fn new(features: InitFeatures) -> Self { - Self { - features, - conn_tracker: test_utils::ConnectionTracker::new(), - } + Self { features, conn_tracker: test_utils::ConnectionTracker::new() } } } impl wire::CustomMessageReader for TestCustomMessageHandler { type CustomMessage = Infallible; - fn read(&self, _: u16, _: &mut R) -> Result, msgs::DecodeError> { + fn read( + &self, _: u16, _: &mut R, + ) -> Result, msgs::DecodeError> { Ok(None) } } @@ -3035,17 +3605,23 @@ mod tests { unreachable!(); } - fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { Vec::new() } + fn get_and_clear_pending_msg(&self) -> Vec<(PublicKey, Self::CustomMessage)> { + Vec::new() + } fn peer_disconnected(&self, their_node_id: PublicKey) { self.conn_tracker.peer_disconnected(their_node_id); } - fn peer_connected(&self, their_node_id: PublicKey, _msg: &Init, _inbound: bool) -> Result<(), ()> { + fn peer_connected( + &self, their_node_id: PublicKey, _msg: &Init, _inbound: bool, + ) -> Result<(), ()> { self.conn_tracker.peer_connected(their_node_id) } - fn provided_node_features(&self) -> NodeFeatures { NodeFeatures::empty() } + fn provided_node_features(&self) -> NodeFeatures { + NodeFeatures::empty() + } fn provided_init_features(&self, _: PublicKey) -> InitFeatures { self.features.clone() @@ -3061,15 +3637,15 @@ mod tests { feature_bits[32] = 0b00000001; InitFeatures::from_le_bytes(feature_bits) }; - cfgs.push( - PeerManagerCfg{ - chan_handler: test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)), - logger: test_utils::TestLogger::with_id(i.to_string()), - routing_handler: test_utils::TestRoutingMessageHandler::new(), - custom_handler: TestCustomMessageHandler::new(features), - node_signer: test_utils::TestNodeSigner::new(node_secret), - } - ); + cfgs.push(PeerManagerCfg { + chan_handler: test_utils::TestChannelMessageHandler::new( + ChainHash::using_genesis_block(Network::Testnet), + ), + logger: test_utils::TestLogger::with_id(i.to_string()), + routing_handler: test_utils::TestRoutingMessageHandler::new(), + custom_handler: TestCustomMessageHandler::new(features), + node_signer: test_utils::TestNodeSigner::new(node_secret), + }); } cfgs @@ -3084,15 +3660,15 @@ mod tests { feature_bits[33 + i] = 0b00000001; InitFeatures::from_le_bytes(feature_bits) }; - cfgs.push( - PeerManagerCfg{ - chan_handler: test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)), - logger: test_utils::TestLogger::new(), - routing_handler: test_utils::TestRoutingMessageHandler::new(), - custom_handler: TestCustomMessageHandler::new(features), - node_signer: test_utils::TestNodeSigner::new(node_secret), - } - ); + cfgs.push(PeerManagerCfg { + chan_handler: test_utils::TestChannelMessageHandler::new( + ChainHash::using_genesis_block(Network::Testnet), + ), + logger: test_utils::TestLogger::new(), + routing_handler: test_utils::TestRoutingMessageHandler::new(), + custom_handler: TestCustomMessageHandler::new(features), + node_signer: test_utils::TestNodeSigner::new(node_secret), + }); } cfgs @@ -3104,40 +3680,73 @@ mod tests { let node_secret = SecretKey::from_slice(&[42 + i as u8; 32]).unwrap(); let features = InitFeatures::from_le_bytes(vec![0u8; 33]); let network = ChainHash::from(&[i as u8; 32]); - cfgs.push( - PeerManagerCfg{ - chan_handler: test_utils::TestChannelMessageHandler::new(network), - logger: test_utils::TestLogger::new(), - routing_handler: test_utils::TestRoutingMessageHandler::new(), - custom_handler: TestCustomMessageHandler::new(features), - node_signer: test_utils::TestNodeSigner::new(node_secret), - } - ); + cfgs.push(PeerManagerCfg { + chan_handler: test_utils::TestChannelMessageHandler::new(network), + logger: test_utils::TestLogger::new(), + routing_handler: test_utils::TestRoutingMessageHandler::new(), + custom_handler: TestCustomMessageHandler::new(features), + node_signer: test_utils::TestNodeSigner::new(node_secret), + }); } cfgs } - fn create_network<'a>(peer_count: usize, cfgs: &'a Vec) -> Vec> { + fn create_network<'a>( + peer_count: usize, cfgs: &'a Vec, + ) -> Vec< + PeerManager< + FileDescriptor, + &'a test_utils::TestChannelMessageHandler, + &'a test_utils::TestRoutingMessageHandler, + IgnoringMessageHandler, + &'a test_utils::TestLogger, + &'a TestCustomMessageHandler, + &'a test_utils::TestNodeSigner, + >, + > { let mut peers = Vec::new(); for i in 0..peer_count { let ephemeral_bytes = [i as u8; 32]; let msg_handler = MessageHandler { - chan_handler: &cfgs[i].chan_handler, route_handler: &cfgs[i].routing_handler, - onion_message_handler: IgnoringMessageHandler {}, custom_message_handler: &cfgs[i].custom_handler + chan_handler: &cfgs[i].chan_handler, + route_handler: &cfgs[i].routing_handler, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: &cfgs[i].custom_handler, }; - let peer = PeerManager::new(msg_handler, 0, &ephemeral_bytes, &cfgs[i].logger, &cfgs[i].node_signer); + let peer = PeerManager::new( + msg_handler, + 0, + &ephemeral_bytes, + &cfgs[i].logger, + &cfgs[i].node_signer, + ); peers.push(peer); } peers } - type TestPeer<'a> = PeerManager; - - fn try_establish_connection<'a>(peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>) -> (FileDescriptor, FileDescriptor, Result, Result) { - let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; - let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; + type TestPeer<'a> = PeerManager< + FileDescriptor, + &'a test_utils::TestChannelMessageHandler, + &'a test_utils::TestRoutingMessageHandler, + IgnoringMessageHandler, + &'a test_utils::TestLogger, + &'a TestCustomMessageHandler, + &'a test_utils::TestNodeSigner, + >; + + fn try_establish_connection<'a>( + peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>, + ) -> ( + FileDescriptor, + FileDescriptor, + Result, + Result, + ) { + let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; + let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; static FD_COUNTER: AtomicUsize = AtomicUsize::new(0); let fd = FD_COUNTER.fetch_add(1, Ordering::Relaxed) as u16; @@ -3146,7 +3755,8 @@ mod tests { let mut fd_a = FileDescriptor::new(fd); let mut fd_b = FileDescriptor::new(fd); - let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); + let initial_data = + peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); peer_a.process_events(); @@ -3165,10 +3775,11 @@ mod tests { (fd_a, fd_b, a_refused, b_refused) } - - fn establish_connection<'a>(peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>) -> (FileDescriptor, FileDescriptor) { - let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; - let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; + fn establish_connection<'a>( + peer_a: &TestPeer<'a>, peer_b: &TestPeer<'a>, + ) -> (FileDescriptor, FileDescriptor) { + let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; + let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap(); let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap(); @@ -3201,64 +3812,84 @@ mod tests { let peers = Arc::new(create_network(2, unsafe { &*(&*cfgs as *const _) as &'static _ })); let start_time = std::time::Instant::now(); - macro_rules! spawn_thread { ($id: expr) => { { - let peers = Arc::clone(&peers); - let cfgs = Arc::clone(&cfgs); - std::thread::spawn(move || { - let mut ctr = 0; - while start_time.elapsed() < std::time::Duration::from_secs(1) { - let id_a = peers[0].node_signer.get_node_id(Recipient::Node).unwrap(); - let mut fd_a = FileDescriptor::new($id + ctr * 3); - let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; - let mut fd_b = FileDescriptor::new($id + ctr * 3); - let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; - let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); - peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); - if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; } - + macro_rules! spawn_thread { + ($id: expr) => {{ + let peers = Arc::clone(&peers); + let cfgs = Arc::clone(&cfgs); + std::thread::spawn(move || { + let mut ctr = 0; while start_time.elapsed() < std::time::Duration::from_secs(1) { - peers[0].process_events(); - if fd_a.disconnect.load(Ordering::Acquire) { break; } - let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); - if peers[1].read_event(&mut fd_b, &a_data).is_err() { break; } - - peers[1].process_events(); - if fd_b.disconnect.load(Ordering::Acquire) { break; } - let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); - if peers[0].read_event(&mut fd_a, &b_data).is_err() { break; } - - let node_id_1 = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); - cfgs[0].chan_handler.pending_events.lock().unwrap() - .push(MessageSendEvent::SendShutdown { - node_id: node_id_1, - msg: msgs::Shutdown { - channel_id: ChannelId::new_zero(), - scriptpubkey: bitcoin::ScriptBuf::new(), + let id_a = peers[0].node_signer.get_node_id(Recipient::Node).unwrap(); + let mut fd_a = FileDescriptor::new($id + ctr * 3); + let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; + let mut fd_b = FileDescriptor::new($id + ctr * 3); + let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; + let initial_data = peers[1] + .new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())) + .unwrap(); + peers[0] + .new_inbound_connection(fd_a.clone(), Some(addr_b.clone())) + .unwrap(); + if peers[0].read_event(&mut fd_a, &initial_data).is_err() { + break; + } + + while start_time.elapsed() < std::time::Duration::from_secs(1) { + peers[0].process_events(); + if fd_a.disconnect.load(Ordering::Acquire) { + break; + } + let a_data = fd_a.outbound_data.lock().unwrap().split_off(0); + if peers[1].read_event(&mut fd_b, &a_data).is_err() { + break; + } + + peers[1].process_events(); + if fd_b.disconnect.load(Ordering::Acquire) { + break; + } + let b_data = fd_b.outbound_data.lock().unwrap().split_off(0); + if peers[0].read_event(&mut fd_a, &b_data).is_err() { + break; + } + + let node_id_1 = + peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); + cfgs[0].chan_handler.pending_events.lock().unwrap().push( + MessageSendEvent::SendShutdown { + node_id: node_id_1, + msg: msgs::Shutdown { + channel_id: ChannelId::new_zero(), + scriptpubkey: bitcoin::ScriptBuf::new(), + }, }, - }); - let node_id_0 = peers[0].node_signer.get_node_id(Recipient::Node).unwrap(); - cfgs[1].chan_handler.pending_events.lock().unwrap() - .push(MessageSendEvent::SendShutdown { - node_id: node_id_0, - msg: msgs::Shutdown { - channel_id: ChannelId::new_zero(), - scriptpubkey: bitcoin::ScriptBuf::new(), + ); + let node_id_0 = + peers[0].node_signer.get_node_id(Recipient::Node).unwrap(); + cfgs[1].chan_handler.pending_events.lock().unwrap().push( + MessageSendEvent::SendShutdown { + node_id: node_id_0, + msg: msgs::Shutdown { + channel_id: ChannelId::new_zero(), + scriptpubkey: bitcoin::ScriptBuf::new(), + }, }, - }); + ); - if ctr % 2 == 0 { - peers[0].timer_tick_occurred(); - peers[1].timer_tick_occurred(); + if ctr % 2 == 0 { + peers[0].timer_tick_occurred(); + peers[1].timer_tick_occurred(); + } } - } - peers[0].socket_disconnected(&fd_a); - peers[1].socket_disconnected(&fd_b); - ctr += 1; - std::thread::sleep(std::time::Duration::from_micros(1)); - } - }) - } } } + peers[0].socket_disconnected(&fd_a); + peers[1].socket_disconnected(&fd_b); + ctr += 1; + std::thread::sleep(std::time::Duration::from_micros(1)); + } + }) + }}; + } let thrd_a = spawn_thread!(1); let thrd_b = spawn_thread!(2); @@ -3277,10 +3908,11 @@ mod tests { for (peer_a, peer_b) in peer_pairs.iter() { let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap(); let mut fd_a = FileDescriptor::new(1); - let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; + let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; let mut fd_b = FileDescriptor::new(1); - let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; - let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); + let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; + let initial_data = + peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); peer_a.process_events(); @@ -3307,10 +3939,11 @@ mod tests { for (peer_a, peer_b) in peer_pairs.iter() { let id_a = peer_a.node_signer.get_node_id(Recipient::Node).unwrap(); let mut fd_a = FileDescriptor::new(1); - let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000}; + let addr_a = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }; let mut fd_b = FileDescriptor::new(1); - let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001}; - let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); + let addr_b = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1001 }; + let initial_data = + peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap(); peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap(); assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false); peer_a.process_events(); @@ -3350,18 +3983,27 @@ mod tests { // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and // push a message from one peer to another. let cfgs = create_peermgr_cfgs(2); - let a_chan_handler = test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)); - let b_chan_handler = test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet)); + let a_chan_handler = test_utils::TestChannelMessageHandler::new( + ChainHash::using_genesis_block(Network::Testnet), + ); + let b_chan_handler = test_utils::TestChannelMessageHandler::new( + ChainHash::using_genesis_block(Network::Testnet), + ); let mut peers = create_network(2, &cfgs); let (fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); assert_eq!(peers[0].peers.read().unwrap().len(), 1); let their_id = peers[1].node_signer.get_node_id(Recipient::Node).unwrap(); - let msg = msgs::Shutdown { channel_id: ChannelId::from_bytes([42; 32]), scriptpubkey: bitcoin::ScriptBuf::new() }; - a_chan_handler.pending_events.lock().unwrap().push(MessageSendEvent::SendShutdown { - node_id: their_id, msg: msg.clone() - }); + let msg = msgs::Shutdown { + channel_id: ChannelId::from_bytes([42; 32]), + scriptpubkey: bitcoin::ScriptBuf::new(), + }; + a_chan_handler + .pending_events + .lock() + .unwrap() + .push(MessageSendEvent::SendShutdown { node_id: their_id, msg: msg.clone() }); peers[0].message_handler.chan_handler = &a_chan_handler; b_chan_handler.expect_receive_msg(wire::Message::Shutdown(msg)); @@ -3383,11 +4025,12 @@ mod tests { let peers = create_network(2, &cfgs); let mut fd_dup = FileDescriptor::new(3); - let addr_dup = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1003}; + let addr_dup = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1003 }; let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap(); peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap(); - let mut dup_encryptor = PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap()); + let mut dup_encryptor = + PeerChannelEncryptor::new_outbound(id_a, SecretKey::from_slice(&[42; 32]).unwrap()); let initial_data = dup_encryptor.get_act_one(&peers[1].secp_ctx); assert_eq!(peers[0].read_event(&mut fd_dup, &initial_data).unwrap(), false); peers[0].process_events(); @@ -3443,14 +4086,29 @@ mod tests { match handler & !1 { 0 => { - peers[handler & 1].message_handler.chan_handler.conn_tracker.fail_connections.store(true, Ordering::Release); - } + peers[handler & 1] + .message_handler + .chan_handler + .conn_tracker + .fail_connections + .store(true, Ordering::Release); + }, 2 => { - peers[handler & 1].message_handler.route_handler.conn_tracker.fail_connections.store(true, Ordering::Release); - } + peers[handler & 1] + .message_handler + .route_handler + .conn_tracker + .fail_connections + .store(true, Ordering::Release); + }, 4 => { - peers[handler & 1].message_handler.custom_message_handler.conn_tracker.fail_connections.store(true, Ordering::Release); - } + peers[handler & 1] + .message_handler + .custom_message_handler + .conn_tracker + .fail_connections + .store(true, Ordering::Release); + }, _ => panic!(), } let (_sd1, _sd2, a_refused, b_refused) = try_establish_connection(&peers[0], &peers[1]); @@ -3462,13 +4120,51 @@ mod tests { assert!(peers[1].list_peers().is_empty()); } // At least one message handler should have seen the connection. - assert!(peers[handler & 1].message_handler.chan_handler.conn_tracker.had_peers.load(Ordering::Acquire) || - peers[handler & 1].message_handler.route_handler.conn_tracker.had_peers.load(Ordering::Acquire) || - peers[handler & 1].message_handler.custom_message_handler.conn_tracker.had_peers.load(Ordering::Acquire)); + assert!( + peers[handler & 1] + .message_handler + .chan_handler + .conn_tracker + .had_peers + .load(Ordering::Acquire) + || peers[handler & 1] + .message_handler + .route_handler + .conn_tracker + .had_peers + .load(Ordering::Acquire) + || peers[handler & 1] + .message_handler + .custom_message_handler + .conn_tracker + .had_peers + .load(Ordering::Acquire) + ); // And both message handlers doing tracking should see the disconnection - assert!(peers[handler & 1].message_handler.chan_handler.conn_tracker.connected_peers.lock().unwrap().is_empty()); - assert!(peers[handler & 1].message_handler.route_handler.conn_tracker.connected_peers.lock().unwrap().is_empty()); - assert!(peers[handler & 1].message_handler.custom_message_handler.conn_tracker.connected_peers.lock().unwrap().is_empty()); + assert!(peers[handler & 1] + .message_handler + .chan_handler + .conn_tracker + .connected_peers + .lock() + .unwrap() + .is_empty()); + assert!(peers[handler & 1] + .message_handler + .route_handler + .conn_tracker + .connected_peers + .lock() + .unwrap() + .is_empty()); + assert!(peers[handler & 1] + .message_handler + .custom_message_handler + .conn_tracker + .connected_peers + .lock() + .unwrap() + .is_empty()); } #[test] @@ -3498,7 +4194,7 @@ mod tests { // Make each peer to read the messages that the other peer just wrote to them. Note that // due to the max-message-before-ping limits this may take a few iterations to complete. - for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { + for _ in 0..150 / super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { peers[1].process_events(); let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0); assert!(!a_read_data.is_empty()); @@ -3511,7 +4207,11 @@ mod tests { peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); peers[0].process_events(); - assert_eq!(fd_a.outbound_data.lock().unwrap().len(), 0, "Until A receives data, it shouldn't send more messages"); + assert_eq!( + fd_a.outbound_data.lock().unwrap().len(), + 0, + "Until A receives data, it shouldn't send more messages" + ); } // Check that each peer has received the expected number of channel updates and channel @@ -3563,20 +4263,34 @@ mod tests { // two of the noise handshake along with our init message but before we receive their init // message. let logger = test_utils::TestLogger::new(); - let node_signer_a = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[42; 32]).unwrap()); - let node_signer_b = test_utils::TestNodeSigner::new(SecretKey::from_slice(&[43; 32]).unwrap()); - let peer_a = PeerManager::new(MessageHandler { - chan_handler: ErroringMessageHandler::new(), - route_handler: IgnoringMessageHandler {}, - onion_message_handler: IgnoringMessageHandler {}, - custom_message_handler: IgnoringMessageHandler {}, - }, 0, &[0; 32], &logger, &node_signer_a); - let peer_b = PeerManager::new(MessageHandler { - chan_handler: ErroringMessageHandler::new(), - route_handler: IgnoringMessageHandler {}, - onion_message_handler: IgnoringMessageHandler {}, - custom_message_handler: IgnoringMessageHandler {}, - }, 0, &[1; 32], &logger, &node_signer_b); + let node_signer_a = + test_utils::TestNodeSigner::new(SecretKey::from_slice(&[42; 32]).unwrap()); + let node_signer_b = + test_utils::TestNodeSigner::new(SecretKey::from_slice(&[43; 32]).unwrap()); + let peer_a = PeerManager::new( + MessageHandler { + chan_handler: ErroringMessageHandler::new(), + route_handler: IgnoringMessageHandler {}, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: IgnoringMessageHandler {}, + }, + 0, + &[0; 32], + &logger, + &node_signer_a, + ); + let peer_b = PeerManager::new( + MessageHandler { + chan_handler: ErroringMessageHandler::new(), + route_handler: IgnoringMessageHandler {}, + onion_message_handler: IgnoringMessageHandler {}, + custom_message_handler: IgnoringMessageHandler {}, + }, + 0, + &[1; 32], + &logger, + &node_signer_b, + ); let a_id = node_signer_a.get_node_id(Recipient::Node).unwrap(); let mut fd_a = FileDescriptor::new(1); @@ -3597,18 +4311,50 @@ mod tests { peer_b.timer_tick_occurred(); let act_three_with_init_b = fd_b.outbound_data.lock().unwrap().split_off(0); - assert!(!peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete()); + assert!(!peer_a + .peers + .read() + .unwrap() + .get(&fd_a) + .unwrap() + .lock() + .unwrap() + .handshake_complete()); assert_eq!(peer_a.read_event(&mut fd_a, &act_three_with_init_b).unwrap(), false); peer_a.process_events(); - assert!(peer_a.peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().handshake_complete()); + assert!(peer_a + .peers + .read() + .unwrap() + .get(&fd_a) + .unwrap() + .lock() + .unwrap() + .handshake_complete()); let init_a = fd_a.outbound_data.lock().unwrap().split_off(0); assert!(!init_a.is_empty()); - assert!(!peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete()); + assert!(!peer_b + .peers + .read() + .unwrap() + .get(&fd_b) + .unwrap() + .lock() + .unwrap() + .handshake_complete()); assert_eq!(peer_b.read_event(&mut fd_b, &init_a).unwrap(), false); peer_b.process_events(); - assert!(peer_b.peers.read().unwrap().get(&fd_b).unwrap().lock().unwrap().handshake_complete()); + assert!(peer_b + .peers + .read() + .unwrap() + .get(&fd_b) + .unwrap() + .lock() + .unwrap() + .handshake_complete()); // Make sure we're still connected. assert_eq!(peer_b.peers.read().unwrap().len(), 1); @@ -3623,10 +4369,13 @@ mod tests { { let peers = peer_a.peers.read().unwrap(); let mut peer_b = peers.get(&fd_a).unwrap().lock().unwrap(); - peer_a.enqueue_message(&mut peer_b, &msgs::WarningMessage { - channel_id: ChannelId([0; 32]), - data: "no disconnect plz".to_string(), - }); + peer_a.enqueue_message( + &mut peer_b, + &msgs::WarningMessage { + channel_id: ChannelId([0; 32]), + data: "no disconnect plz".to_string(), + }, + ); } peer_a.process_events(); let msg = fd_a.outbound_data.lock().unwrap().split_off(0); @@ -3660,24 +4409,26 @@ mod tests { let peers = create_network(2, &cfgs); let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); - macro_rules! drain_queues { () => { - loop { - peers[0].process_events(); - peers[1].process_events(); + macro_rules! drain_queues { + () => { + loop { + peers[0].process_events(); + peers[1].process_events(); - let msg = fd_a.outbound_data.lock().unwrap().split_off(0); - if !msg.is_empty() { - assert_eq!(peers[1].read_event(&mut fd_b, &msg).unwrap(), false); - continue; - } - let msg = fd_b.outbound_data.lock().unwrap().split_off(0); - if !msg.is_empty() { - assert_eq!(peers[0].read_event(&mut fd_a, &msg).unwrap(), false); - continue; + let msg = fd_a.outbound_data.lock().unwrap().split_off(0); + if !msg.is_empty() { + assert_eq!(peers[1].read_event(&mut fd_b, &msg).unwrap(), false); + continue; + } + let msg = fd_b.outbound_data.lock().unwrap().split_off(0); + if !msg.is_empty() { + assert_eq!(peers[0].read_event(&mut fd_a, &msg).unwrap(), false); + continue; + } + break; } - break; - } - } } + }; + } // First, make sure all pending messages have been processed and queues drained. drain_queues!(); @@ -3685,10 +4436,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let key = SecretKey::from_slice(&[1; 32]).unwrap(); let msg = channel_announcement(&key, &key, ChannelFeatures::empty(), 42, &secp_ctx); - let msg_ev = MessageSendEvent::BroadcastChannelAnnouncement { - msg, - update_msg: None, - }; + let msg_ev = MessageSendEvent::BroadcastChannelAnnouncement { msg, update_msg: None }; fd_a.hang_writes.store(true, Ordering::Relaxed); @@ -3699,15 +4447,37 @@ mod tests { peers[0].process_events(); } - assert_eq!(peers[0].peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.len(), - OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP); + assert_eq!( + peers[0] + .peers + .read() + .unwrap() + .get(&fd_a) + .unwrap() + .lock() + .unwrap() + .gossip_broadcast_buffer + .len(), + OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + ); // Check that if a broadcast message comes in from the channel handler (i.e. it is an // announcement for our own channel), it gets queued anyway. cfgs[0].chan_handler.pending_events.lock().unwrap().push(msg_ev); peers[0].process_events(); - assert_eq!(peers[0].peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.len(), - OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 1); + assert_eq!( + peers[0] + .peers + .read() + .unwrap() + .get(&fd_a) + .unwrap() + .lock() + .unwrap() + .gossip_broadcast_buffer + .len(), + OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 1 + ); // Finally, deliver all the messages and make sure we got the right count. Note that there // was an extra message that had already moved from the broadcast queue to the encrypted @@ -3717,101 +4487,130 @@ mod tests { peers[0].write_buffer_space_avail(&mut fd_a).unwrap(); drain_queues!(); - assert!(peers[0].peers.read().unwrap().get(&fd_a).unwrap().lock().unwrap().gossip_broadcast_buffer.is_empty()); - assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Relaxed), - OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 2); + assert!(peers[0] + .peers + .read() + .unwrap() + .get(&fd_a) + .unwrap() + .lock() + .unwrap() + .gossip_broadcast_buffer + .is_empty()); + assert_eq!( + cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Relaxed), + OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP + 2 + ); } #[test] - fn test_filter_addresses(){ + fn test_filter_addresses() { // Tests the filter_addresses function. // For (10/8) - let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [10, 0, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 255, 201], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [10, 0, 255, 201], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [10, 255, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [10, 255, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (0/8) - let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [0, 0, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 255, 187], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [0, 0, 255, 187], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [0, 255, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [0, 255, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (100.64/10) - let ip_address = SocketAddress::TcpIpV4{addr: [100, 64, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [100, 64, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [100, 78, 255, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [100, 78, 255, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [100, 127, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [100, 127, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (127/8) - let ip_address = SocketAddress::TcpIpV4{addr: [127, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [127, 0, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [127, 65, 73, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [127, 65, 73, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [127, 255, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [127, 255, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (169.254/16) - let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [169, 254, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 221, 101], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [169, 254, 221, 101], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [169, 254, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (172.16/12) - let ip_address = SocketAddress::TcpIpV4{addr: [172, 16, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [172, 16, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [172, 27, 101, 23], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [172, 27, 101, 23], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [172, 31, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [172, 31, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (192.168/16) - let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 168, 0, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 205, 159], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 168, 205, 159], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 168, 255, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (192.88.99/24) - let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 88, 99, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 140], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 88, 99, 140], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [192, 88, 99, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For other IPv4 addresses - let ip_address = SocketAddress::TcpIpV4{addr: [188, 255, 99, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [188, 255, 99, 0], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); - let ip_address = SocketAddress::TcpIpV4{addr: [123, 8, 129, 14], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [123, 8, 129, 14], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); - let ip_address = SocketAddress::TcpIpV4{addr: [2, 88, 9, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV4 { addr: [2, 88, 9, 255], port: 1000 }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); // For (2000::/3) - let ip_address = SocketAddress::TcpIpV6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); - let ip_address = SocketAddress::TcpIpV6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); - let ip_address = SocketAddress::TcpIpV6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone())); // For other IPv6 addresses - let ip_address = SocketAddress::TcpIpV6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); - let ip_address = SocketAddress::TcpIpV6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000}; + let ip_address = SocketAddress::TcpIpV6 { + addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], + port: 1000, + }; assert_eq!(filter_addresses(Some(ip_address.clone())), None); // For (None) @@ -3833,7 +4632,9 @@ mod tests { // sure we observe a value greater than one at least once. let cfg = Arc::new(create_peermgr_cfgs(1)); // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }. - let peer = Arc::new(create_network(1, unsafe { &*(&*cfg as *const _) as &'static _ }).pop().unwrap()); + let peer = Arc::new( + create_network(1, unsafe { &*(&*cfg as *const _) as &'static _ }).pop().unwrap(), + ); let end_time = Instant::now() + Duration::from_millis(100); let observed_loop = Arc::new(AtomicBool::new(false)); @@ -3842,9 +4643,13 @@ mod tests { let thread_observed_loop = Arc::clone(&observed_loop); move || { while Instant::now() < end_time || !thread_observed_loop.load(Ordering::Acquire) { - test_utils::TestChannelMessageHandler::MESSAGE_FETCH_COUNTER.with(|val| val.store(0, Ordering::Relaxed)); + test_utils::TestChannelMessageHandler::MESSAGE_FETCH_COUNTER + .with(|val| val.store(0, Ordering::Relaxed)); thread_peer.process_events(); - if test_utils::TestChannelMessageHandler::MESSAGE_FETCH_COUNTER.with(|val| val.load(Ordering::Relaxed)) > 1 { + if test_utils::TestChannelMessageHandler::MESSAGE_FETCH_COUNTER + .with(|val| val.load(Ordering::Relaxed)) + > 1 + { thread_observed_loop.store(true, Ordering::Release); return; } From 56cf495e466321213aa4b37216479e502d14f02d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Apr 2025 15:42:03 +0200 Subject: [PATCH 15/15] `rustfmt`: Drop `lightning/src/ln/peer_handler.rs` from exclusion list --- rustfmt_excluded_files | 1 - 1 file changed, 1 deletion(-) diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index a15c1f1613a..4503b0781d5 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -30,7 +30,6 @@ lightning/src/ln/onion_route_tests.rs lightning/src/ln/outbound_payment.rs lightning/src/ln/payment_tests.rs lightning/src/ln/peer_channel_encryptor.rs -lightning/src/ln/peer_handler.rs lightning/src/ln/priv_short_conf_tests.rs lightning/src/ln/reload_tests.rs lightning/src/ln/reorg_tests.rs