diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c00e5a5bf6f..8a9a5cb1762 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -16,7 +16,9 @@ use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; +use crate::types::features::ChannelTypeFeatures; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; +use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::types::features::InitFeatures; use crate::ln::msgs; @@ -1104,6 +1106,10 @@ macro_rules! unwrap_send_err { } } +pub fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 { + (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 +} + /// Check whether N channel monitor(s) have been added. pub fn check_added_monitors>(node: &H, count: usize) { if let Some(chain_monitor) = node.chain_monitor() { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2f8a9b53a5d..2529580358d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -12,58 +12,79 @@ //! claim outputs on-chain. use crate::chain; -use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; -use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; +use crate::chain::channelmonitor::{ + Balance, ChannelMonitorUpdateStep, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, LATENCY_GRACE_PERIOD_BLOCKS, +}; use crate::chain::transaction::OutPoint; -use crate::ln::onion_utils::LocalHTLCFailureReason; -use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; +use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::events::{ + ClosureReason, Event, FundingInfo, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, + PaymentPurpose, +}; +use crate::ln::chan_utils::{ + commitment_tx_base_weight, htlc_success_tx_weight, htlc_timeout_tx_weight, + COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, Channel, ChannelError, InboundV1Channel, + OutboundV1Channel, COINBASE_MATURITY, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, + MIN_CHAN_DUST_LIMIT_SATOSHIS, +}; +use crate::ln::channelmanager::{ + self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, + DISABLE_GOSSIP_TICKS, ENABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, +}; +use crate::ln::msgs; +use crate::ln::msgs::{ + AcceptChannel, BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, + RoutingMessageHandler, +}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; -use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError, MIN_CHAN_DUST_LIMIT_SATOSHIS}; use crate::ln::{chan_utils, onion_utils}; -use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; -use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters}; +use crate::routing::router::{ + get_route, Path, PaymentParameters, Route, RouteHop, RouteParameters, +}; +use crate::sign::{EntropySource, OutputSpender, SignerProvider}; use crate::types::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; -use crate::ln::msgs; -use crate::ln::msgs::{AcceptChannel, BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; -use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; +use crate::types::payment::{PaymentHash, PaymentSecret}; +use crate::util::config::{ + ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, MaxDustHTLCExposure, + UserConfig, +}; use crate::util::errors::APIError; -use crate::util::ser::{Writeable, ReadableArgs}; +use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::string::UntrustedString; -use crate::util::config::{ChannelConfigOverrides, ChannelHandshakeConfigUpdate, ChannelConfigUpdate, MaxDustHTLCExposure, UserConfig}; -use crate::ln::onion_utils::AttributionData; +use crate::util::test_channel_signer::TestChannelSigner; +use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; +use bitcoin::constants::ChainHash; use bitcoin::hash_types::BlockHash; use bitcoin::locktime::absolute::LockTime; -use bitcoin::script::{Builder, ScriptBuf}; -use bitcoin::opcodes; -use bitcoin::constants::ChainHash; use bitcoin::network::Network; -use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness}; -use bitcoin::OutPoint as BitcoinOutPoint; +use bitcoin::opcodes; +use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; +use bitcoin::OutPoint as BitcoinOutPoint; +use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness}; use bitcoin::secp256k1::Secp256k1; -use bitcoin::secp256k1::{PublicKey,SecretKey}; +use bitcoin::secp256k1::{PublicKey, SecretKey}; use crate::io; use crate::prelude::*; +use crate::sync::{Arc, Mutex, RwLock}; use alloc::collections::BTreeSet; -use core::iter::repeat; use bitcoin::hashes::Hash; -use crate::sync::{Arc, Mutex, RwLock}; +use core::iter::repeat; use lightning_macros::xtest; use crate::ln::functional_test_utils::*; -use crate::ln::chan_utils::CommitmentTransaction; use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS; @@ -77,25 +98,34 @@ fn test_channel_resumption_fail_post_funding() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap(); - let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan); - let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 1_000_000, 0, 42, None, None).unwrap(); + let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan); + let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_chan); let (temp_chan_id, tx, funding_output) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output); - nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx).unwrap(); + nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx).unwrap(); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]); + nodes[0].node.peer_disconnected(node_b_id); + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)], + ); // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that // explicitly here. - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); } @@ -110,54 +140,120 @@ pub fn test_insane_channel_opens() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis - let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + let channel_reserve_satoshis = + get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap(); + nodes[0].node.create_channel(node_b_id, channel_value_sat, push_msat, 42, None, None).unwrap(); // Extract the channel open message from node0 to node1 - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); // Test helper that asserts we get the correct error string given a mutator // that supposedly makes the channel open message insane - let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone())); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - let expected_regex = regex::Regex::new(expected_error_str).unwrap(); - if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { - match action { - &ErrorAction::SendErrorMessage { .. } => { - nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1); - }, - _ => panic!("unexpected event!"), + let insane_open_helper = + |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { + let open_channel_mutated = message_mutator(open_channel_message.clone()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + let expected_regex = regex::Regex::new(expected_error_str).unwrap(); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { .. } => { + nodes[1].logger.assert_log_regex( + "lightning::ln::channelmanager", + expected_regex, + 1, + ); + }, + _ => panic!("unexpected event!"), + } + } else { + assert!(false); } - } else { assert!(false); } - }; + }; use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; // Test all mutations that would make the channel open message insane - insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg }); - insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg }); + insane_open_helper( + format!( + "Per our config, funding must be at most {}. It was {}", + TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, + TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2 + ) + .as_str(), + |mut msg| { + msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; + msg + }, + ); + insane_open_helper( + format!( + "Funding must be smaller than the total bitcoin supply. It was {}", + TOTAL_BITCOIN_SUPPLY_SATOSHIS + ) + .as_str(), + |mut msg| { + msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; + msg + }, + ); - insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg }); + insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { + msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; + msg + }); - insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); + insane_open_helper( + r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", + |mut msg| { + msg.push_msat = + (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; + msg + }, + ); - insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg }); + insane_open_helper("Peer never wants payout outputs?", |mut msg| { + msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1; + msg + }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); + insane_open_helper( + r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", + |mut msg| { + msg.common_fields.htlc_minimum_msat = + (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; + msg + }, + ); - insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); + insane_open_helper( + "They wanted our payments to be delayed by a needlessly long period", + |mut msg| { + msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; + msg + }, + ); - insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg }); + insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { + msg.common_fields.max_accepted_htlcs = 0; + msg + }); - insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg }); + insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { + msg.common_fields.max_accepted_htlcs = 484; + msg + }); } #[xtest(feature = "_externalize_tests")] @@ -173,369 +269,26 @@ pub fn test_funding_exceeds_no_wumbo_limit() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) { - Err(APIError::APIMisuseError { err }) => { - assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err); - }, - _ => panic!() - } -} - -fn do_test_counterparty_no_reserve(send_from_initiator: bool) { - // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, - // but only for them. Because some LSPs do it with some level of trust of the clients (for a - // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often - // in normal testing, we test it explicitly here. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let default_config = UserConfig::default(); - - // Have node0 initiate a channel to node1 with aforementioned parameters - let mut push_amt = 100_000_000; - let feerate_per_kw = 253; - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); - let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - if !send_from_initiator { - open_channel_message.channel_reserve_satoshis = 0; - open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; - } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - - // Extract the channel accept message from node1 to node0 - let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - if send_from_initiator { - accept_channel_message.channel_reserve_satoshis = 0; - accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; - } - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); - { - let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; - let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; - let mut sender_node_per_peer_lock; - let mut sender_node_peer_state_lock; - - let channel = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - assert!(channel.is_unfunded_v1()); - channel.funding_mut().holder_selected_channel_reserve_satoshis = 0; - channel.context_mut().holder_max_htlc_value_in_flight_msat = 100_000_000; - } - - let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); - let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); - create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); - - // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s - // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). - if send_from_initiator { - send_payment(&nodes[0], &[&nodes[1]], 100_000_000 - // Note that for outbound channels we have to consider the commitment tx fee and the - // "fee spike buffer", which is currently a multiple of the total commitment tx fee as - // well as an additional HTLC. - - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features)); - } else { - send_payment(&nodes[1], &[&nodes[0]], push_amt); - } -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_counterparty_no_reserve() { - do_test_counterparty_no_reserve(true); - do_test_counterparty_no_reserve(false); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_async_inbound_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - // A B - // update_fee -> - // send (1) commitment_signed -. - // <- update_add_htlc/commitment_signed - // send (2) RAA (awaiting remote revoke) -. - // (1) commitment_signed is delivered -> - // .- send (3) RAA (awaiting remote revoke) - // (2) RAA is delivered -> - // .- send (4) commitment_signed - // <- (3) RAA is delivered - // send (5) commitment_signed -. - // <- (4) commitment_signed is delivered - // send (6) RAA -. - // (5) commitment_signed is delivered -> - // <- RAA - // (6) RAA is delivered -> - - // First nodes[0] generates an update_fee - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - - let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); - SendEvent::from_event(events_1.remove(0)) - }; - assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); - assert_eq!(payment_event.msgs.len(), 1); - - // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - // deliver(1), generate (3): - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2) - let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(bs_update.update_add_htlcs.is_empty()); // (4) - assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) - assert!(bs_update.update_fail_htlcs.is_empty()); // (4) - assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) - assert!(bs_update.update_fee.is_none()); // (4) - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3) - let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(as_update.update_add_htlcs.is_empty()); // (5) - assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) - assert!(as_update.update_fail_htlcs.is_empty()); // (5) - assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) - assert!(as_update.update_fee.is_none()); // (5) - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4) - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // only (6) so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5) - let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke); - check_added_monitors!(nodes[0], 1); - - let events_2 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment - _ => panic!("Unexpected event"), - } - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6) - check_added_monitors!(nodes[1], 1); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_unordered_raa() { - // Just the intro to the previous test followed by an out-of-order RAA (which caused a - // crash in an earlier version of the update_fee patch) - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - // First nodes[0] generates an update_fee - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let update_msg = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - update_fee.as_ref() - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - - let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); - SendEvent::from_event(events_1.remove(0)) - }; - assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); - assert_eq!(payment_event.msgs.len(), 1); - - // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2) - check_added_monitors!(nodes[1], 1); - - // We can't continue, sadly, because our (1) now has a bogus signature -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_multi_flight_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); - - // A B - // update_fee/commitment_signed -> - // .- send (1) RAA and (2) commitment_signed - // update_fee (never committed) -> - // (3) update_fee -> - // We have to manually generate the above update_fee, it is allowed by the protocol but we - // don't track which updates correspond to which revoke_and_ack responses so we're in - // AwaitingRAA mode and will not generate the update_fee yet. - // <- (1) RAA delivered - // (3) is generated and send (4) CS -. - // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't - // know the per_commitment_point to use for it. - // <- (2) commitment_signed delivered - // revoke_and_ack -> - // B should send no response here - // (4) commitment_signed delivered -> - // <- RAA/commitment_signed delivered - // revoke_and_ack -> - - // First nodes[0] generates an update_fee - let initial_feerate; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - initial_feerate = *feerate_lock; - *feerate_lock = initial_feerate + 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + let node_b_id = nodes[1].node.get_our_node_id(); - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref().unwrap(), commitment_signed) + match nodes[0].node.create_channel( + node_b_id, + MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, + 0, + 42, + None, + None, + ) { + Err(APIError::APIMisuseError { err }) => { + let exp_err = format!( + "funding_value must not exceed {}, it was {}", + MAX_FUNDING_SATOSHIS_NO_WUMBO, + MAX_FUNDING_SATOSHIS_NO_WUMBO + 1 + ); + assert_eq!(err, exp_err); }, - _ => panic!("Unexpected event"), - }; - - // Deliver first update_fee/commitment_signed pair, generating (1) and (2): - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg_1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed_1); - let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment - // transaction: - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = initial_feerate + 40; + _ => panic!(), } - nodes[0].node.timer_tick_occurred(); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // Create the (3) update_fee message that nodes[0] will generate before it does... - let mut update_msg_2 = msgs::UpdateFee { - channel_id: update_msg_1.channel_id.clone(), - feerate_per_kw: (initial_feerate + 30) as u32, - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg_2); - - update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; - // Deliver (3) - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg_2); - - // Deliver (1), generating (3) and (4) - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_msg); - let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); - assert!(as_second_update.update_add_htlcs.is_empty()); - assert!(as_second_update.update_fulfill_htlcs.is_empty()); - assert!(as_second_update.update_fail_htlcs.is_empty()); - assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); - // Check that the update_fee newly generated matches what we delivered: - assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); - assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); - - // Deliver (2) commitment_signed - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_msg); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); - - // Delever (4) - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); - let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment); - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_revoke); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); } fn do_test_sanity_on_in_flight_opens(steps: u8) { @@ -549,48 +302,68 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - if steps & 0b1000_0000 != 0{ + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + if steps & 0b1000_0000 != 0 { let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - if steps & 0x0f == 0 { return; } - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + if steps & 0x0f == 0 { + return; + } + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - if steps & 0x0f == 1 { return; } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + if steps & 0x0f == 1 { + return; + } + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - if steps & 0x0f == 2 { return; } - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + if steps & 0x0f == 2 { + return; + } + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = + create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - if steps & 0x0f == 3 { return; } - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + if steps & 0x0f == 3 { + return; + } + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) + .unwrap(); + check_added_monitors(&nodes[0], 0); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( - funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + funding_created.funding_txid.as_byte_array(), + funding_created.funding_output_index, ); - if steps & 0x0f == 4 { return; } - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + if steps & 0x0f == 4 { + return; + } + nodes[1].node.handle_funding_created(node_a_id, &funding_created); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - if steps & 0x0f == 5 { return; } - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); + if steps & 0x0f == 5 { + return; + } + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -598,18 +371,22 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { added_monitors.clear(); } - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); - if steps & 0x0f == 6 { return; } + if steps & 0x0f == 6 { + return; + } create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2); - if steps & 0x0f == 7 { return; } + if steps & 0x0f == 7 { + return; + } confirm_transaction_at(&nodes[0], &tx, 2); connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -635,504 +412,54 @@ pub fn test_sanity_on_in_flight_opens() { } #[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_vanilla() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); - - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 25; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_that_funder_cannot_afford() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let channel_value = 5000; - let push_sats = 700; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); - let channel_id = chan.2; - let secp_ctx = Secp256k1::new(); - let default_config = UserConfig::default(); - let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); - - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); +pub fn fake_network_test() { + // Simple test which builds a network of ChannelManagers, connects them to each other, and + // tests that payments get routed and transactions broadcast in semi-reasonable ways. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee - // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we - // calculate two different feerates here - the expected local limit as well as the expected - // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3); - commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); + // Rebalance the network a bit by relaying one payment through all the channels... + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); - // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. - { - let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); + // Send some more payments + send_payment(&nodes[1], &[&nodes[2], &nodes[3]], 1000000); + send_payment(&nodes[3], &[&nodes[2], &nodes[1], &nodes[0]], 1000000); + send_payment(&nodes[3], &[&nodes[2], &nodes[1]], 1000000); - //We made sure neither party's funds are below the dust limit and there are no HTLCs here - assert_eq!(commitment_tx.output.len(), 2); - let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; - let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); - actual_fee = channel_value - actual_fee; - assert_eq!(total_fee, actual_fee); - } + // Test failure packets + let payment_hash_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 1000000).1; + fail_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], payment_hash_1); - { - // Increment the feerate by a small constant, accounting for rounding errors - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 4; - } - nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); - check_added_monitors!(nodes[0], 0); + // Add a new channel that skips 3 + let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3); - const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; - - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() - }; - - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let nondust_htlcs: Vec = vec![]; - let commitment_tx = CommitmentTransaction::new( - INITIAL_COMMITMENT_NUMBER - 1, - &remote_point, - push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, - non_buffer_feerate + 4, - nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; - - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan.2, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; - - let update_fee = msgs::UpdateFee { - channel_id: chan.2, - feerate_per_kw: non_buffer_feerate + 4, - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_fee); - - //While producing the commitment_signed response after handling a received update_fee request the - //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) - //Should produce and error. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); - check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [nodes[0].node.get_our_node_id()], channel_value); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_that_saturates_subs() { - // Check that when a remote party sends us an `update_fee` message that results in a total fee - // on the commitment transaction that is greater than her balance, we saturate the subtractions, - // and force close the channel. - - let mut default_config = test_default_channel_config(); - let secp_ctx = Secp256k1::new(); - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; - - const FEERATE: u32 = 250 * 10; // 10sat/vb - - // Assert that the new feerate will completely exhaust the balance of node 0, and saturate the - // subtraction of the total fee from node 0's balance. - let total_fee_sat = chan_utils::commit_tx_fee_sat(FEERATE, 0, &ChannelTypeFeatures::empty()); - assert!(total_fee_sat > 1500); - - const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; - - // We build a commitment transcation here only to pass node 1's check of node 0's signature - // in `commitment_signed`. - - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() - }; - - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let nondust_htlcs: Vec = vec![]; - let commitment_tx = CommitmentTransaction::new( - INITIAL_COMMITMENT_NUMBER, - &remote_point, - 8500, - // Set a zero balance here: this is the transaction that node 1 will expect a signature for, as - // he will do a saturating subtraction of the total fees from node 0's balance. - 0, - FEERATE, - nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; - - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan_id, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; - - let update_fee = msgs::UpdateFee { - channel_id: chan_id, - feerate_per_kw: FEERATE, - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_fee); - - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); - check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [nodes[0].node.get_our_node_id()], 10_000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_with_fundee_update_add_htlc() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); - - // nothing happens since node[1] is in AwaitingRemoteRevoke - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - { - let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - added_monitors.clear(); - } - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - // node[1] has nothing to do - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); - check_added_monitors!(nodes[1], 1); - // AwaitingRemoteRevoke ends here - - let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert_eq!(commitment_update.update_add_htlcs.len(), 1); - assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); - assert_eq!(commitment_update.update_fail_htlcs.len(), 0); - assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); - assert_eq!(commitment_update.update_fee.is_none(), true); - - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); - check_added_monitors!(nodes[1], 1); - let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke); - check_added_monitors!(nodes[0], 1); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - expect_pending_htlcs_forwardable!(nodes[0]); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentClaimable { .. } => { }, - _ => panic!("Unexpected event"), - }; - - claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); - - send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); - close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let channel_id = chan.2; - - // A B - // (1) update_fee/commitment_signed -> - // <- (2) revoke_and_ack - // .- send (3) commitment_signed - // (4) update_fee/commitment_signed -> - // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) - // <- (3) commitment_signed delivered - // send (6) revoke_and_ack -. - // <- (5) deliver revoke_and_ack - // (6) deliver revoke_and_ack -> - // .- send (7) commitment_signed in response to (4) - // <- (7) deliver commitment_signed - // revoke_and_ack -> - - // Create and deliver (1)... - let feerate; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - feerate = *feerate_lock; - *feerate_lock = feerate + 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - // Generate (2) and (3): - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - - // Deliver (2): - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - // Create and deliver (4)... - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate + 30; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - check_added_monitors!(nodes[1], 1); - // ... creating (5) - let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - // Handle (3), creating (6): - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_0); - check_added_monitors!(nodes[0], 1); - let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - // Deliver (5): - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - // Deliver (6), creating (7): - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg_0); - let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(commitment_update.update_add_htlcs.is_empty()); - assert!(commitment_update.update_fulfill_htlcs.is_empty()); - assert!(commitment_update.update_fail_htlcs.is_empty()); - assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); - - // Deliver (7) - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); - assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); - close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn fake_network_test() { - // Simple test which builds a network of ChannelManagers, connects them to each other, and - // tests that payments get routed and transactions broadcast in semi-reasonable ways. - let chanmon_cfgs = create_chanmon_cfgs(4); - let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); - let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - - // Create some initial channels - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3); - - // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - - // Send some more payments - send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000); - send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000); - send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000); - - // Test failure packets - let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1; - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1); - - // Add a new channel that skips 3 - let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3); - - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000); - send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[3]], 1000000); + send_payment(&nodes[2], &[&nodes[3]], 1000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); // Do some rebalance loop payments, simultaneously let mut hops = Vec::with_capacity(3); hops.push(RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: NodeFeatures::empty(), short_channel_id: chan_2.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1141,7 +468,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: NodeFeatures::empty(), short_channel_id: chan_3.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1150,7 +477,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_4.0.contents.short_channel_id, channel_features: nodes[1].node.channel_features(), @@ -1158,19 +485,24 @@ pub fn fake_network_test() { cltv_expiry_delta: TEST_FINAL_CLTV, maybe_announced_channel: true, }); - hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; - hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_params = PaymentParameters::from_node_id( - nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV - ).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; + hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1000000); - let payment_preimage_1 = send_along_route(&nodes[1], - Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params.clone()) }, - &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; + let route = Route { + paths: vec![Path { hops, blinded_tail: None }], + route_params: Some(route_params.clone()), + }; + let path: &[_] = &[&nodes[2], &nodes[3], &nodes[1]]; + let payment_preimage_1 = send_along_route(&nodes[1], route, path, 1000000).0; let mut hops = Vec::with_capacity(3); hops.push(RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: NodeFeatures::empty(), short_channel_id: chan_4.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1179,7 +511,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: NodeFeatures::empty(), short_channel_id: chan_3.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1188,7 +520,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_2.0.contents.short_channel_id, channel_features: nodes[1].node.channel_features(), @@ -1196,147 +528,43 @@ pub fn fake_network_test() { cltv_expiry_delta: TEST_FINAL_CLTV, maybe_announced_channel: true, }); - hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; - hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_hash_2 = send_along_route(&nodes[1], - Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }, - &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; + hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; + hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; + let route = + Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }; + let path: &[_] = &[&nodes[3], &nodes[2], &nodes[1]]; + let payment_hash_2 = send_along_route(&nodes[1], route, path, 1000000).1; // Claim the rebalances... - fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); - claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1); + fail_payment(&nodes[1], &[&nodes[3], &nodes[2], &nodes[1]], payment_hash_2); + claim_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[1]], payment_preimage_1); // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); - close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); - close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn holding_cell_htlc_counting() { - // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs - // to ensure we don't end up with HTLCs sitting around in our holding cell for several - // commitment dance rounds. - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - - let mut payments = Vec::new(); - for _ in 0..50 { - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - payments.push((payment_preimage, payment_hash)); - } - check_added_monitors!(nodes[1], 1); - - let mut events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let initial_payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id()); - - // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in - // the holding cell waiting on B's RAA to send. At this point we should not be able to add - // another HTLC. - { - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - } - - // This should also be true if we try to forward a payment. - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - // We have to forward pending HTLCs twice - once tries to forward the payment forward (and - // fails), the second will process the resulting failure and fail the HTLC backward. - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); - - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); - - expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false); - - // Now forward all the pending HTLCs and claim them back - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); - - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); - let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - - for ref update in as_updates.update_add_htlcs.iter() { - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update); - } - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_updates.commitment_signed); - check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); - let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_final_raa); - check_added_monitors!(nodes[2], 1); - - expect_pending_htlcs_forwardable!(nodes[2]); - - let events = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events.len(), payments.len()); - for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) { - match event { - &Event::PaymentClaimable { ref payment_hash, .. } => { - assert_eq!(*payment_hash, *hash); - }, - _ => panic!("Unexpected event"), - }; - } + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); + let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, node_c_reason, [node_b_id], 100000); - for (preimage, _) in payments.drain(..) { - claim_payment(&nodes[1], &[&nodes[2]], preimage); - } + close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); + let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); + let node_d_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[3], 1, node_d_reason, [node_c_id], 100000); - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_d_id], 100000); + let node_d_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[3], 1, node_d_reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1355,17 +583,18 @@ pub fn duplicate_htlc_test() { create_announced_chan_between_nodes(&nodes, 3, 4); create_announced_chan_between_nodes(&nodes, 3, 5); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[3], &nodes[4]], 1000000); *nodes[0].network_payment_count.borrow_mut() -= 1; - assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage); + assert_eq!(route_payment(&nodes[1], &[&nodes[3]], 1000000).0, payment_preimage); *nodes[0].network_payment_count.borrow_mut() -= 1; - assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage); + assert_eq!(route_payment(&nodes[2], &[&nodes[3], &nodes[5]], 1000000).0, payment_preimage); - claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage); - fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash); - claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[3], &nodes[4]], payment_preimage); + fail_payment(&nodes[2], &[&nodes[3], &nodes[5]], payment_hash); + claim_payment(&nodes[1], &[&nodes[3]], payment_preimage); } #[xtest(feature = "_externalize_tests")] @@ -1378,6 +607,8 @@ pub fn test_duplicate_htlc_different_direction_onchain() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 @@ -1385,18 +616,26 @@ pub fn test_duplicate_htlc_different_direction_onchain() { let payment_value_msats = payment_value_sats * 1000; // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 900_000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); - let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], payment_value_msats, payment_hash, node_a_payment_secret); + let node_a_payment_secret = + nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); + send_along_route_with_secret( + &nodes[1], + route, + &[&[&nodes[0]]], + payment_value_msats, + payment_hash, + node_a_payment_secret, + ); // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[0], payment_hash, payment_value_msats); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Broadcast node 1 commitment txn let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2); @@ -1413,8 +652,8 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(has_both_htlcs, 2); mine_transaction(&nodes[0], &remote_txn[0]); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -1424,36 +663,64 @@ pub fn test_duplicate_htlc_different_direction_onchain() { check_spends!(claim_txn[1], remote_txn[0]); check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let timeout_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output).unwrap(); - let preimage_bump_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output).unwrap(); + let timeout_tx = claim_txn + .iter() + .skip(1) + .find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output) + .unwrap(); + let preimage_bump_tx = claim_txn + .iter() + .skip(1) + .find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output) + .unwrap(); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_bump_tx.input.len(), 1); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), payment_value_sats); + assert_eq!( + remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), + payment_value_sats + ); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx check_spends!(timeout_tx, remote_txn[0]); - assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900); + assert_eq!( + remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), + 900 + ); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 3); for e in events { match e { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::DisconnectPeer { ref msg }, + } => { + assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + assert_eq!(node_b_id, *node_id); }, _ => panic!("Unexpected event"), } @@ -1461,1617 +728,869 @@ pub fn test_duplicate_htlc_different_direction_onchain() { } #[xtest(feature = "_externalize_tests")] -pub fn test_basic_channel_reserve() { +pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let channels0 = node_chanmgrs[0].list_channels(); + let channels1 = node_chanmgrs[1].list_channels(); + let default_config = UserConfig::default(); + assert_eq!(channels0.len(), 1); + assert_eq!(channels1.len(), 1); - // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2)); - let max_can_send = 5000000 - channel_reserve - commit_tx_fee; - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); - unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config); + assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve * 1000); + assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve * 1000); - send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); + assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve * 1000); + assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve * 1000); } -#[xtest(feature = "_externalize_tests")] -pub fn test_fee_spike_violation_fails_htlc() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let (mut route, payment_hash, _, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 3460000); - route.paths[0].hops[0].fee_msat += 1; - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); - - let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; - - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 3460001, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 0, - amount_msat: htlc_msat, - payment_hash: payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, - }; - - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); - - // Now manually create the commitment_signed message corresponding to the update_add - // nodes[0] just sent. In the code for construction of this message, "local" refers - // to the sender of the message, and "remote" refers to the receiver. - - let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} - const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} - let (local_secret, next_local_point) = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = local_chan.get_signer(); - // Make the signer believe we validated another commitment, so we can release the secret - chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - - (chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap()) - }; - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() - }; +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - // Build the remote commitment transaction so we can sign it, and then later use the - // signature for the commitment_signed message. - let local_chan_balance = 1313; + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } - let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { - offered: false, - amount_msat: 3460001, - cltv_expiry: htlc_cltv, - payment_hash, - transaction_output_index: Some(1), - }; + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); - let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let commitment_tx = CommitmentTransaction::new( - commitment_number, - &remote_point, - 95000, - local_chan_balance, - feerate_per_kw, - vec![accepted_htlc_info], - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan.2, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); - // Send the commitment_signed message to the nodes[1]. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); - let _ = nodes[1].node.get_and_clear_pending_msg_events(); + // Force close the B<->C channel by timing out the HTLC + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); - // Send the RAA to nodes[1]. - let raa_msg = msgs::RevokeAndACK { - channel_id: chan.2, - per_commitment_secret: local_secret, - next_per_commitment_point: next_local_point, - #[cfg(taproot)] - next_local_nonce: None, - }; - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid + // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + + // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which + // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). + let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; + connect_blocks(&nodes[1], upstream_timeout_blocks); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - // Make sure the HTLC failed in the way we expect. - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => { - assert_eq!(update_fail_htlcs.len(), 1); - update_fail_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); - check_added_monitors!(nodes[1], 3); -} + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); + check_added_monitors(&nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { - let mut chanmon_cfgs = create_chanmon_cfgs(2); - // Set the fee rate for the channel very high, to the point where the fundee - // sending any above-dust amount would result in a channel reserve violation. - // In this test we check that we would be prevented from sending an HTLC in - // this situation. - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + let conditions = PaymentFailedConditions::new().blamed_chan_closed(true); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { + node_id: Some(node_c_id), + channel_id: chan_2.2 + }] + ); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors(&nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &node_b_id); - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + let reason = ClosureReason::HTLCsTimedOut; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100_000); + check_added_monitors(&nodes[2], 1); - let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); - // Sending exactly enough to hit the reserve amount should be accepted - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); - } + nodes[1].node.handle_update_fail_htlc(node_c_id, &update_fail); + let err_msg = get_err_msg(&nodes[1], &node_c_id); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors(&nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); - // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &node_c_id); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; } #[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a - // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment - // transaction fee with 0 HTLCs (183 sats)). - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); - - // Send four HTLCs to cover the initial push_msat buffer we're required to include - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); - } - - let (mut route, payment_hash, _, payment_secret) = - get_route_and_payment_hash!(nodes[1], nodes[0], 1000); - route.paths[0].hops[0].fee_msat = 700_000; - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 700_000, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, - amount_msat: htlc_msat, - payment_hash: payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, - }; +pub fn channel_monitor_network_test() { + // Simple test which builds a network of ChannelManagers, connects them to each other, and + // tests that ChannelMonitor is able to recover from various states. + let chanmon_cfgs = create_chanmon_cfgs(5); + let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]); + let nodes = create_network(5, &node_cfgs, &node_chanmgrs); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &msg); - // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); - assert_eq!(nodes[0].node.list_channels().len(), 0); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, - [nodes[1].node.get_our_node_id()], 100000); -} + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { - // Test that if we receive many dust HTLCs over an outbound channel, they don't count when - // calculating our commitment transaction fee (this was previously broken). - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3); + let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + // Make sure all nodes are at the same starting height + connect_blocks(&nodes[0], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + connect_blocks(&nodes[4], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); - // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a - // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment - // transaction fee with 0 HTLCs (183 sats)). - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); + // Rebalance the network a bit by relaying one payment through all the channels... + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); - let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 - + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1; - // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel - // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the - // commitment transaction fee. - route_payment(&nodes[1], &[&nodes[0]], dust_amt); + // Simple case with no pending HTLCs: + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, err).unwrap(); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + { + let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); + assert_eq!(node_txn.len(), 1); + mine_transaction(&nodes[1], &node_txn[0]); + if nodes[1].connect_style.borrow().updates_best_block_first() { + let _ = nodes[1].tx_broadcaster.txn_broadcast(); + } - // Send four HTLCs to cover the initial push_msat buffer we're required to include - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + mine_transaction(&nodes[0], &node_txn[0]); + check_added_monitors(&nodes[0], 1); + test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); } + check_closed_broadcast!(nodes[0], true); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - // One more than the dust amt should fail, however. - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); - route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); -} + // One pending HTLC is discarded by the force-close: + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_init_feerate_unaffordability() { - // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to - // channel reserve and feerate requirements. - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not + // broadcasted until we reach the timelock time). + let error_message = "Channel force-closed"; + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()) + .unwrap(); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); + { + let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); + connect_blocks( + &nodes[1], + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1, + ); + test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + mine_transaction(&nodes[2], &node_txn[0]); + check_added_monitors(&nodes[2], 1); + test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE); + } + check_closed_broadcast!(nodes[2], true); + assert_eq!(nodes[1].node.list_channels().len(), 0); + assert_eq!(nodes[2].node.list_channels().len(), 1); + let node_b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single - // HTLC. - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(), - APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); - - // During open, we don't have a "counterparty channel reserve" to check against, so that - // requirement only comes into play on the open_channel handling side. - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap(); - let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - open_channel_msg.push_msat += 1; - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + macro_rules! claim_funds { + ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {{ + $node.node.claim_funds($preimage); + expect_payment_claimed!($node, $payment_hash, 3_000_000); + check_added_monitors(&$node, 1); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); - }, - _ => panic!("Unexpected event"), + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. }, + } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(*node_id, $prev_node.node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + }; + }}; } -} -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { - // Test that if we receive many dust HTLCs over an inbound channel, they don't count when - // calculating our counterparty's commitment transaction fee (this was previously broken). - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); + // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] + // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) + let err = "Channel force-closed".to_string(); + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, err).unwrap(); + check_added_monitors(&nodes[2], 1); + check_closed_broadcast!(nodes[2], true); + let node2_commitment_txid; + { + let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); + test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); + node2_commitment_txid = node_txn[0].compute_txid(); - let payment_amt = 46000; // Dust amount - // In the previous code, these first four payments would succeed. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); + // Claim the payment on nodes[3], giving it knowledge of the preimage + claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1); + mine_transaction(&nodes[3], &node_txn[0]); + check_added_monitors(&nodes[3], 1); + check_preimage_claim(&nodes[3], &node_txn); + } + check_closed_broadcast!(nodes[3], true); + assert_eq!(nodes[2].node.list_channels().len(), 0); + assert_eq!(nodes[3].node.list_channels().len(), 1); + let node_c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); - // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); + // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and + // confusing us in the following tests. + let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); - // And this last payment previously resulted in nodes[1] closing on its inbound-channel - // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment - // transaction fee and therefore perceived this next payment as a channel reserve violation. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); -} + // One pending HTLC to time out: + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[3], &[&nodes[4]], 3_000_000); + // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for + // buffer space). -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); + let (close_chan_update_1, close_chan_update_2) = { + connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); + let events = nodes[3].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let close_chan_update_1 = match events[1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => msg.clone(), + _ => panic!("Unexpected event"), + }; + match events[0] { + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id, + } => { + assert_eq!(node_id, node_e_id); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors(&nodes[3], 1); - let feemsat = 239; - let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); + // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer. + { + let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap(); + node_txn.retain(|tx| { + if tx.input[0].previous_output.txid == node2_commitment_txid { + false + } else { + true + } + }); + } - // Add a 2* and +1 for the fee spike reserve. - let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; - let amt_msat_1 = recv_value_1 + total_routing_fee_msat; + let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT); - // Add a pending HTLC. - let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); - let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + // Claim the payment on nodes[4], giving it knowledge of the preimage + claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); - - // Attempt to trigger a channel reserve violation --> payment failure. - let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); - let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; - let amt_msat_2 = recv_value_2 + total_routing_fee_msat; - let mut route_2 = route_1.clone(); - route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; - - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 1, - amount_msat: htlc_msat + 1, - payment_hash: our_payment_hash_1, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, + connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let events = nodes[4].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let close_chan_update_2 = match events[1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => msg.clone(), + _ => panic!("Unexpected event"), + }; + match events[0] { + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id, + } => { + assert_eq!(node_id, node_d_id); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors(&nodes[4], 1); + test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); + check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [node_d_id], 100000); + + mine_transaction(&nodes[4], &node_txn[0]); + check_preimage_claim(&nodes[4], &node_txn); + (close_chan_update_1, close_chan_update_2) }; + let node_id_4 = node_e_id; + let node_id_3 = node_d_id; + nodes[3].gossip_sync.handle_channel_update(Some(node_id_4), &close_chan_update_2).unwrap(); + nodes[4].gossip_sync.handle_channel_update(Some(node_id_3), &close_chan_update_1).unwrap(); + assert_eq!(nodes[3].node.list_channels().len(), 0); + assert_eq!(nodes[4].node.list_channels().len(), 0); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); - // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); - assert_eq!(nodes[1].node.list_channels().len(), 1); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, - [nodes[0].node.get_our_node_id()], 100000); + assert_eq!( + nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [node_id_4], 100000); } #[xtest(feature = "_externalize_tests")] -pub fn test_inbound_outbound_capacity_is_not_zero() { - let chanmon_cfgs = create_chanmon_cfgs(2); +pub fn test_justice_tx_htlc_timeout() { + // Test justice txn built on revoked HTLC-Timeout tx, against both sides + let mut alice_config = test_default_channel_config(); + alice_config.channel_handshake_config.announce_for_forwarding = true; + alice_config.channel_handshake_limits.force_announced_channel_preference = false; + alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; + let mut bob_config = test_default_channel_config(); + bob_config.channel_handshake_config.announce_for_forwarding = true; + bob_config.channel_handshake_limits.force_announced_channel_preference = false; + bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; + let user_cfgs = [Some(alice_config), Some(bob_config)]; + let mut chanmon_cfgs = create_chanmon_cfgs(2); + chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; + chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let channels0 = node_chanmgrs[0].list_channels(); - let channels1 = node_chanmgrs[1].list_channels(); - let default_config = UserConfig::default(); - assert_eq!(channels0.len(), 1); - assert_eq!(channels1.len(), 1); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config); - assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000); - assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); - assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); -} + // Create some new channels: + let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); + + // A pending HTLC which will be revoked: + let payment_preimage_3 = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; + // Get the will-be-revoked local txn from nodes[0] + let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2); + assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.compute_txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!( + revoked_local_txn[1].input[0].previous_output.txid, + revoked_local_txn[0].compute_txid() + ); + assert_eq!( + revoked_local_txn[1].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // HTLC-Timeout + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); + + { + mine_transaction(&nodes[1], &revoked_local_txn[0]); + { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + // The revoked HTLC output is not pinnable for another `TEST_FINAL_CLTV` blocks, and is + // thus claimed in the same transaction with the revoked to_self output. + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 2); + check_spends!(node_txn[0], revoked_local_txn[0]); + assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); + node_txn.clear(); + } + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); + + mine_transaction(&nodes[0], &revoked_local_txn[0]); + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires -fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 { - (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 + // Verify broadcast of revoked HTLC-timeout + let node_txn = test_txn_broadcast( + &nodes[0], + &chan_5, + Some(revoked_local_txn[0].clone()), + HTLCType::TIMEOUT, + ); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + // Broadcast revoked HTLC-timeout on node 1 + mine_transaction(&nodes[1], &node_txn[1]); + test_revoked_htlc_claim_txn_broadcast( + &nodes[1], + node_txn[1].clone(), + revoked_local_txn[0].clone(), + ); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); } #[xtest(feature = "_externalize_tests")] -pub fn test_channel_reserve_holding_cell_htlcs() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - // When this test was written, the default base fee floated based on the HTLC count. - // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); - config.channel_config.forwarding_fee_base_msat = 239; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001); - let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001); - let chan_2_user_id = nodes[2].node.list_channels()[0].user_channel_id; - - let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); +pub fn test_justice_tx_htlc_success() { + // Test justice txn built on revoked HTLC-Success tx, against both sides + let mut alice_config = test_default_channel_config(); + alice_config.channel_handshake_config.announce_for_forwarding = true; + alice_config.channel_handshake_limits.force_announced_channel_preference = false; + alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; + let mut bob_config = test_default_channel_config(); + bob_config.channel_handshake_config.announce_for_forwarding = true; + bob_config.channel_handshake_limits.force_announced_channel_preference = false; + bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; + let user_cfgs = [Some(alice_config), Some(bob_config)]; + let mut chanmon_cfgs = create_chanmon_cfgs(2); + chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; + chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; - let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); - let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - macro_rules! expect_forward { - ($node: expr) => {{ - let mut events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - check_added_monitors!($node, 1); - let payment_event = SendEvent::from_event(events.remove(0)); - payment_event - }} - } + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - let feemsat = 239; // set above - let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2); + // Create some new channels: + let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); - let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; + // A pending HTLC which will be revoked: + let payment_preimage_4 = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; + // Get the will-be-revoked local txn from B + let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2); + assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.compute_txid()); + assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present - // attempt to send amt_msat > their_max_htlc_value_in_flight_msat + // Revoke the old state + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_4); { - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } + mine_transaction(&nodes[0], &revoked_local_txn[0]); + { + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx + assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output - // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete - // nodes[0]'s wealth - loop { - let amt_msat = recv_value_0 + total_fee_msat; - // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. - // Also, ensure that each payment has enough to be over the dust limit to - // ensure it'll be included in each commit tx fee calculation. - let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); - let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); - if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { - break; + check_spends!(node_txn[0], revoked_local_txn[0]); + node_txn.swap_remove(0); } + check_added_monitors(&nodes[0], 1); + test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); - let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); - let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - - let (stat01_, stat11_, stat12_, stat22_) = ( - get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), - get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), - get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), - get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), + mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let node_txn = test_txn_broadcast( + &nodes[1], + &chan_6, + Some(revoked_local_txn[0].clone()), + HTLCType::SUCCESS, + ); + check_added_monitors(&nodes[1], 1); + mine_transaction(&nodes[0], &node_txn[1]); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + test_revoked_htlc_claim_txn_broadcast( + &nodes[0], + node_txn[1].clone(), + revoked_local_txn[0].clone(), ); - - assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); - assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); - assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); - assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); - stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; - } - - // adding pending output. - // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve. - // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity - // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to - // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us - // to test channel channel reserve policy at the edges of what amount is sendable, i.e. - // cases where 1 msat over X amount will cause a payment failure, but anything less than - // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting - // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments - // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee - // policy. - let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; - let amt_msat_1 = recv_value_1 + total_fee_msat; - - let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); - let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); - - // channel reserve test with htlc pending output > 0 - let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; - { - let mut route = route_1.clone(); - route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } - - // split the rest to test holding cell - let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); - let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; - let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; - let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; - { - let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); +} - // now see if they go through on both sides - let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); - // but this will stuck in the holding cell - nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, - RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 0); +#[xtest(feature = "_externalize_tests")] +pub fn revoked_output_claim() { + // Simple test to ensure a node will claim a revoked output when a stale remote commitment + // transaction is broadcast by its counterparty + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // test with outbound holding cell amount > 0 - { - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); - // this will also stuck in the holding cell - nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, - RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output + let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); + assert_eq!(revoked_local_txn.len(), 1); + // Only output is the full channel value back to nodes[0]: + assert_eq!(revoked_local_txn[0].output.len(), 1); + // Send a payment through, updating everyone's latest commitment txn + send_payment(&nodes[0], &[&nodes[1]], 5000000); - // flush the pending htlc - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); + // Inform nodes[1] that nodes[0] broadcast a stale tx + mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output - // the pending htlc should be promoted to committed - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_revoke_and_ack); - check_added_monitors!(nodes[0], 1); - let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_spends!(node_txn[0], revoked_local_txn[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan + mine_transaction(&nodes[0], &revoked_local_txn[0]); + get_announce_close_broadcast_events(&nodes, 0, 1); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); +} - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &bs_revoke_and_ack); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); +#[xtest(feature = "_externalize_tests")] +pub fn test_forming_justice_tx_from_monitor_updates() { + do_test_forming_justice_tx_from_monitor_updates(true); + do_test_forming_justice_tx_from_monitor_updates(false); +} - expect_pending_htlcs_forwardable!(nodes[1]); +fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) { + // Simple test to make sure that the justice tx formed in WatchtowerPersister + // is properly formed and can be broadcasted/confirmed successfully in the event + // that a revoked commitment transaction is broadcasted + // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually) + let chanmon_cfgs = create_chanmon_cfgs(2); + let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap(); + let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap(); + let persisters = vec![ + WatchtowerPersister::new(destination_script0), + WatchtowerPersister::new(destination_script1), + ]; + let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let ref payment_event_11 = expect_forward!(nodes[1]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]); - commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - // flush the htlcs in the holding cell - assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]); - commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); + if !broadcast_initial_commitment { + // Send a payment to move the channel forward + send_payment(&nodes[0], &[&nodes[1]], 5_000_000); + } - let ref payment_event_3 = expect_forward!(nodes[1]); - assert_eq!(payment_event_3.msgs.len(), 2); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output. + // We'll keep this commitment transaction to broadcast once it's revoked. + let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id); + assert_eq!(revoked_local_txn.len(), 1); + let revoked_commitment_tx = &revoked_local_txn[0]; - commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); + // Send another payment, now revoking the previous commitment tx + send_payment(&nodes[0], &[&nodes[1]], 5_000_000); - let events = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - match events[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { - assert_eq!(our_payment_hash_21, *payment_hash); - assert_eq!(recv_value_21, amount_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); - assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret_21, *payment_secret); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } - }, - _ => panic!("Unexpected event"), - } - match events[1] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { - assert_eq!(our_payment_hash_22, *payment_hash); - assert_eq!(recv_value_22, amount_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); - assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret_22, *payment_secret); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } - }, - _ => panic!("Unexpected event"), - } + let justice_tx = + persisters[1].justice_tx(channel_id, &revoked_commitment_tx.compute_txid()).unwrap(); + check_spends!(justice_tx, revoked_commitment_tx); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); + mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); + mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); - let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features); - let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; - send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100_000); + get_announce_close_broadcast_events(&nodes, 1, 0); - let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); - let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - assert_eq!(stat0.value_to_self_msat, expected_value_to_self); - assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); - let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); - assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); -} + // Check that the justice tx has sent the revoked output value to nodes[1] + let monitor = get_monitor!(nodes[1], channel_id); + let total_claimable_balance = + monitor.get_claimable_balances().iter().fold(0, |sum, balance| match balance { + channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => { + sum + amount_satoshis + }, + _ => panic!("Unexpected balance type"), + }); + // On the first commitment, node[1]'s balance was below dust so it didn't have an output + let node1_channel_balance = if broadcast_initial_commitment { + 0 + } else { + revoked_commitment_tx.output[0].value.to_sat() + }; + let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat(); + assert_eq!(total_claimable_balance, expected_claimable_balance); +} #[xtest(feature = "_externalize_tests")] -pub fn channel_reserve_in_flight_removes() { - // In cases where one side claims an HTLC, it thinks it has additional available funds that it - // can send to its counterparty, but due to update ordering, the other side may not yet have - // considered those HTLCs fully removed. - // This tests that we don't count HTLCs which will not be included in the next remote - // commitment transaction towards the reserve value (as it implies no commitment transaction - // will be generated which violates the remote reserve value). - // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test. - // To test this we: - // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when - // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if - // you only consider the value of the first HTLC, it may not), - // * start routing a third HTLC from A to B, - // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put - // the other claim in its holding cell, as it immediately goes into AwaitingRAA), - // * deliver the first fulfill from B - // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell - // claim, - // * deliver A's response CS and RAA. - // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having - // removed it fully. B now has the push_msat plus the first two HTLCs in value. - // * Now B happily sends another HTLC, potentially violating its reserve value from A's point - // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC). - let chanmon_cfgs = create_chanmon_cfgs(2); +pub fn claim_htlc_outputs() { + // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx + let mut chanmon_cfgs = create_chanmon_cfgs(2); + chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - // Route the first two HTLCs. - let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000); - - // Start routing the third HTLC (this is just used to get everyone in the right state). - let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - let send_1 = { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - - // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an - // initial fulfill/CS. - nodes[1].node.claim_funds(payment_preimage_1); - expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); - check_added_monitors!(nodes[1], 1); - let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not - // remove the second HTLC when we send the HTLC back from B to A. - nodes[1].node.claim_funds(payment_preimage_2); - expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_1.commitment_msg); - check_added_monitors!(nodes[1], 1); - // B is already AwaitingRAA, so cant generate a CS here - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); - check_added_monitors!(nodes[0], 1); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - - // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the - // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. - // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A - // can no longer broadcast a commitment transaction with it and B has the preimage so can go - // on-chain as necessary). - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); - - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); - - // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't - // resolve the second HTLC from A's point of view. - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); - check_added_monitors!(nodes[0], 1); - expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back - // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. - let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); - let send_2 = { - nodes[1].node.send_payment_with_route(route, payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - let mut events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_2.commitment_msg); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // Now just resolve all the outstanding messages/HTLCs for completeness... + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // Create some new channel: + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[1], 1); + // Rebalance the network to generate htlc in the two directions + send_payment(&nodes[0], &[&nodes[1]], 8_000_000); + // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; + let (_payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[1], &[&nodes[0]], 3_000_000); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); - check_added_monitors!(nodes[0], 1); - expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + // Get the will-be-revoked local txn from node[0] + let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); + assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx + assert_eq!(revoked_local_txn[0].input.len(), 1); + assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); + assert_eq!(revoked_local_txn[1].input.len(), 1); + assert_eq!( + revoked_local_txn[1].input[0].previous_output.txid, + revoked_local_txn[0].compute_txid() + ); + assert_eq!( + revoked_local_txn[1].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // HTLC-Timeout + check_spends!(revoked_local_txn[1], revoked_local_txn[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // Revoke the old state. + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); - check_added_monitors!(nodes[0], 1); + { + mine_transaction(&nodes[0], &revoked_local_txn[0]); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - expect_pending_htlcs_forwardable!(nodes[0]); - expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty txn - claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); -} + // The ChannelMonitor should claim the accepted HTLC output separately from the offered + // HTLC and to_self outputs. + let accepted_claim = node_txn.iter().filter(|tx| tx.input.len() == 1).next().unwrap(); + let offered_to_self_claim = + node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); + check_spends!(accepted_claim, revoked_local_txn[0]); + check_spends!(offered_to_self_claim, revoked_local_txn[0]); + assert_eq!( + accepted_claim.input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); -enum PostFailBackAction { - TimeoutOnChain, - ClaimOnChain, - FailOffChain, - ClaimOffChain, -} + let mut witness_lens = BTreeSet::new(); + witness_lens.insert(offered_to_self_claim.input[0].witness.last().unwrap().len()); + witness_lens.insert(offered_to_self_claim.input[1].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 2); + assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); -#[test] -fn test_fail_back_before_backwards_timeout() { - do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); - do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); - do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); - do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); + // Finally, mine the penalty transaction and check that we get an HTLC failure after + // ANTI_REORG_DELAY confirmations. + mine_transaction(&nodes[1], accepted_claim); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_payment_failed!(nodes[1], payment_hash_2, false); + } + get_announce_close_broadcast_events(&nodes, 0, 1); + assert_eq!(nodes[0].node.list_channels().len(), 0); + assert_eq!(nodes[1].node.list_channels().len(), 0); } -fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { - // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream - // just before the upstream timeout expires +// Test that the HTLC package logic removes HTLCs from the package when they are claimed by the +// counterparty, even when the counterparty claims HTLCs from multiple packages in a single +// transaction. +// +// This is a regression test for https://github.com/lightningdevkit/rust-lightning/issues/3537. +#[xtest(feature = "_externalize_tests")] +pub fn test_multiple_package_conflicts() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut user_cfg = test_default_channel_config(); + + // Anchor channels are required so that multiple HTLC-Successes can be aggregated into a single + // transaction. + user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + user_cfg.manually_accept_inbound_channels = true; + + let configs = [Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - for node in nodes.iter() { - *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; - } + let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); - create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - - // Start every node on the same block height to make reasoning about timeouts easier - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + // Since we're using anchor channels, make sure each node has a UTXO for paying fees. + let coinbase_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![ + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[2].wallet_source.get_change_script().unwrap(), + }, + ], + }; + nodes[0].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, + coinbase_tx.output[0].value, + ); + nodes[1].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, + coinbase_tx.output[1].value, + ); + nodes[2].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 2 }, + coinbase_tx.output[2].value, + ); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + // Create the network. + // 0 -- 1 -- 2 + // + // Payments will be routed from node 0 to node 2. Node 2 will force close and spend HTLCs from + // two of node 1's packages. We will then verify that node 1 correctly removes the conflicting + // HTLC spends from its packages. + const CHAN_CAPACITY: u64 = 10_000_000; + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_CAPACITY, 0); + let (_, _, cid_1_2, funding_tx_1_2) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_CAPACITY, 0); - // Force close the B<->C channel by timing out the HTLC - let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; - connect_blocks(&nodes[1], timeout_blocks); - let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); - check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); - check_closed_broadcast(&nodes[1], 1, true); - check_added_monitors(&nodes[1], 1); + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + connect_blocks(node, blocks_to_mine); + } + } - // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid - // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + - // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which - // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). - let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; - connect_blocks(&nodes[1], upstream_timeout_blocks); + // Route HTLC 1. + let (preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - // Connect blocks for nodes[0] to make sure they don't go on-chain - connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + // Route HTLCs 2 and 3, with CLTVs 1 higher than HTLC 1. The higher CLTVs will cause these + // HTLCs to be included in a different package than HTLC 1. + connect_blocks(&nodes[0], 1); + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + let (preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000_000); - // Check that nodes[1] fails the HTLC upstream - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2.2 - }]); - check_added_monitors!(nodes[1], 1); - let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + // Mine blocks until HTLC 1 times out in 1 block and HTLCs 2 and 3 time out in 2 blocks. + connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().blamed_chan_closed(true)); + // Node 2 force closes, causing node 1 to group the HTLCs into the following packages: + // Package 1: HTLC 1 + // Package 2: HTLCs 2 and 3 + let node2_commit_tx = get_local_commitment_txn!(nodes[2], cid_1_2); + assert_eq!(node2_commit_tx.len(), 1); + let node2_commit_tx = &node2_commit_tx[0]; + check_spends!(node2_commit_tx, funding_tx_1_2); + mine_transaction(&nodes[1], node2_commit_tx); + check_closed_event( + &nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[node_c_id], + CHAN_CAPACITY, + ); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); - // Make sure we handle possible duplicate fails or extra messages after failing back - match post_fail_back_action { - PostFailBackAction::TimeoutOnChain => { - // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again - mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment - mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout - connect_blocks(&nodes[1], ANTI_REORG_DELAY); - // Expect handling another fail back event, but the HTLC is already gone - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2.2 - }]); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - }, - PostFailBackAction::ClaimOnChain => { - nodes[2].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + // Node 1 should immediately claim package 1 but has to wait a block to claim package 2. + let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(timeout_tx.len(), 1); + check_spends!(timeout_tx[0], node2_commit_tx); + assert_eq!(timeout_tx[0].input.len(), 1); - connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); - let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); - check_closed_broadcast!(nodes[2], true); - check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); - check_added_monitors!(nodes[2], 1); - - mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment - mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success - connect_blocks(&nodes[1], ANTI_REORG_DELAY); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - }, - PostFailBackAction::FailOffChain => { - nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], - vec![HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); - let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - let update_fail = commitment_update.update_fail_htlcs[0].clone(); - - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); - let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); - assert_eq!(err_msg.channel_id, chan_2.2); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - }, - PostFailBackAction::ClaimOffChain => { - nodes[2].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); - let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); - - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); - let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); - assert_eq!(err_msg.channel_id, chan_2.2); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - }, - }; -} - -#[xtest(feature = "_externalize_tests")] -pub fn channel_monitor_network_test() { - // Simple test which builds a network of ChannelManagers, connects them to each other, and - // tests that ChannelMonitor is able to recover from various states. - let chanmon_cfgs = create_chanmon_cfgs(5); - let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]); - let nodes = create_network(5, &node_cfgs, &node_chanmgrs); - - // Create some initial channels - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3); - let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4); - - // Make sure all nodes are at the same starting height - connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); - connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); - - // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - - // Simple case with no pending HTLCs: - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000); - { - let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); - assert_eq!(node_txn.len(), 1); - mine_transaction(&nodes[1], &node_txn[0]); - if nodes[1].connect_style.borrow().updates_best_block_first() { - let _ = nodes[1].tx_broadcaster.txn_broadcast(); - } - - mine_transaction(&nodes[0], &node_txn[0]); - check_added_monitors!(nodes[0], 1); - test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); - } - check_closed_broadcast!(nodes[0], true); - assert_eq!(nodes[0].node.list_channels().len(), 0); - assert_eq!(nodes[1].node.list_channels().len(), 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - - // One pending HTLC is discarded by the force-close: - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); - - // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not - // broadcasted until we reach the timelock time). - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - { - let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); - connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); - test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); - mine_transaction(&nodes[2], &node_txn[0]); - check_added_monitors!(nodes[2], 1); - test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE); - } - check_closed_broadcast!(nodes[2], true); - assert_eq!(nodes[1].node.list_channels().len(), 0); - assert_eq!(nodes[2].node.list_channels().len(), 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - - macro_rules! claim_funds { - ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => { - { - $node.node.claim_funds($preimage); - expect_payment_claimed!($node, $payment_hash, 3_000_000); - check_added_monitors!($node, 1); - - let events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert_eq!(*node_id, $prev_node.node.get_our_node_id()); - }, - _ => panic!("Unexpected event"), - }; - } - } - } - - // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] - // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - let error_message = "Channel force-closed"; - nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_added_monitors!(nodes[2], 1); - check_closed_broadcast!(nodes[2], true); - let node2_commitment_txid; - { - let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); - test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); - node2_commitment_txid = node_txn[0].compute_txid(); - - // Claim the payment on nodes[3], giving it knowledge of the preimage - claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1); - mine_transaction(&nodes[3], &node_txn[0]); - check_added_monitors!(nodes[3], 1); - check_preimage_claim(&nodes[3], &node_txn); - } - check_closed_broadcast!(nodes[3], true); - assert_eq!(nodes[2].node.list_channels().len(), 0); - assert_eq!(nodes[3].node.list_channels().len(), 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); - - // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and - // confusing us in the following tests. - let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); - - // One pending HTLC to time out: - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000); - // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for - // buffer space). - - let (close_chan_update_1, close_chan_update_2) = { - connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); - let events = nodes[3].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - let close_chan_update_1 = match events[1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { - assert_eq!(node_id, nodes[4].node.get_our_node_id()); - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[3], 1); - - // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer. - { - let mut node_txn = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap(); - node_txn.retain(|tx| { - if tx.input[0].previous_output.txid == node2_commitment_txid { - false - } else { true } - }); - } - - let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT); - - // Claim the payment on nodes[4], giving it knowledge of the preimage - claim_funds!(nodes[4], nodes[3], payment_preimage_2, payment_hash_2); - - connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); - let events = nodes[4].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); - let close_chan_update_2 = match events[1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; - match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { - assert_eq!(node_id, nodes[3].node.get_our_node_id()); - }, - _ => panic!("Unexpected event"), - } - check_added_monitors!(nodes[4], 1); - test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); - check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000); - - mine_transaction(&nodes[4], &node_txn[0]); - check_preimage_claim(&nodes[4], &node_txn); - (close_chan_update_1, close_chan_update_2) - }; - let node_id_4 = nodes[4].node.get_our_node_id(); - let node_id_3 = nodes[3].node.get_our_node_id(); - nodes[3].gossip_sync.handle_channel_update(Some(node_id_4), &close_chan_update_2).unwrap(); - nodes[4].gossip_sync.handle_channel_update(Some(node_id_3), &close_chan_update_1).unwrap(); - assert_eq!(nodes[3].node.list_channels().len(), 0); - assert_eq!(nodes[4].node.list_channels().len(), 0); - - assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), - Ok(ChannelMonitorUpdateStatus::Completed)); - check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [node_id_4], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_justice_tx_htlc_timeout() { - // Test justice txn built on revoked HTLC-Timeout tx, against both sides - let mut alice_config = test_default_channel_config(); - alice_config.channel_handshake_config.announce_for_forwarding = true; - alice_config.channel_handshake_limits.force_announced_channel_preference = false; - alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); - bob_config.channel_handshake_config.announce_for_forwarding = true; - bob_config.channel_handshake_limits.force_announced_channel_preference = false; - bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; - let user_cfgs = [Some(alice_config), Some(bob_config)]; - let mut chanmon_cfgs = create_chanmon_cfgs(2); - chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; - chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // Create some new channels: - let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); - - // A pending HTLC which will be revoked: - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - // Get the will-be-revoked local txn from nodes[0] - let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2); - assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx - assert_eq!(revoked_local_txn[0].input.len(), 1); - assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.compute_txid()); - assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present - assert_eq!(revoked_local_txn[1].input.len(), 1); - assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].compute_txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout - // Revoke the old state - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); - - { - mine_transaction(&nodes[1], &revoked_local_txn[0]); - { - let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // The revoked HTLC output is not pinnable for another `TEST_FINAL_CLTV` blocks, and is - // thus claimed in the same transaction with the revoked to_self output. - assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input.len(), 2); - check_spends!(node_txn[0], revoked_local_txn[0]); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); - node_txn.clear(); - } - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); - - mine_transaction(&nodes[0], &revoked_local_txn[0]); - connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - // Verify broadcast of revoked HTLC-timeout - let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - // Broadcast revoked HTLC-timeout on node 1 - mine_transaction(&nodes[1], &node_txn[1]); - test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); - } - get_announce_close_broadcast_events(&nodes, 0, 1); - assert_eq!(nodes[0].node.list_channels().len(), 0); - assert_eq!(nodes[1].node.list_channels().len(), 0); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_justice_tx_htlc_success() { - // Test justice txn built on revoked HTLC-Success tx, against both sides - let mut alice_config = test_default_channel_config(); - alice_config.channel_handshake_config.announce_for_forwarding = true; - alice_config.channel_handshake_limits.force_announced_channel_preference = false; - alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5; - let mut bob_config = test_default_channel_config(); - bob_config.channel_handshake_config.announce_for_forwarding = true; - bob_config.channel_handshake_limits.force_announced_channel_preference = false; - bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3; - let user_cfgs = [Some(alice_config), Some(bob_config)]; - let mut chanmon_cfgs = create_chanmon_cfgs(2); - chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; - chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // Create some new channels: - let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); - - // A pending HTLC which will be revoked: - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - // Get the will-be-revoked local txn from B - let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2); - assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx - assert_eq!(revoked_local_txn[0].input.len(), 1); - assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.compute_txid()); - assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present - // Revoke the old state - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4); - { - mine_transaction(&nodes[0], &revoked_local_txn[0]); - { - let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 1); // ChannelMonitor: penalty tx - assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output - - check_spends!(node_txn[0], revoked_local_txn[0]); - node_txn.swap_remove(0); - } - check_added_monitors!(nodes[0], 1); - test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); - - mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); - check_added_monitors!(nodes[1], 1); - mine_transaction(&nodes[0], &node_txn[1]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); - } - get_announce_close_broadcast_events(&nodes, 0, 1); - assert_eq!(nodes[0].node.list_channels().len(), 0); - assert_eq!(nodes[1].node.list_channels().len(), 0); -} - -#[xtest(feature = "_externalize_tests")] -pub fn revoked_output_claim() { - // Simple test to ensure a node will claim a revoked output when a stale remote commitment - // transaction is broadcast by its counterparty - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output - let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); - assert_eq!(revoked_local_txn.len(), 1); - // Only output is the full channel value back to nodes[0]: - assert_eq!(revoked_local_txn[0].output.len(), 1); - // Send a payment through, updating everyone's latest commitment txn - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000); - - // Inform nodes[1] that nodes[0] broadcast a stale tx - mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output - - check_spends!(node_txn[0], revoked_local_txn[0]); - - // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan - mine_transaction(&nodes[0], &revoked_local_txn[0]); - get_announce_close_broadcast_events(&nodes, 0, 1); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_forming_justice_tx_from_monitor_updates() { - do_test_forming_justice_tx_from_monitor_updates(true); - do_test_forming_justice_tx_from_monitor_updates(false); -} - -fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: bool) { - // Simple test to make sure that the justice tx formed in WatchtowerPersister - // is properly formed and can be broadcasted/confirmed successfully in the event - // that a revoked commitment transaction is broadcasted - // (Similar to `revoked_output_claim` test but we get the justice tx + broadcast manually) - let chanmon_cfgs = create_chanmon_cfgs(2); - let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap(); - let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap(); - let persisters = vec![WatchtowerPersister::new(destination_script0), - WatchtowerPersister::new(destination_script1)]; - let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - - if !broadcast_initial_commitment { - // Send a payment to move the channel forward - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); - } - - // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output. - // We'll keep this commitment transaction to broadcast once it's revoked. - let revoked_local_txn = get_local_commitment_txn!(nodes[0], channel_id); - assert_eq!(revoked_local_txn.len(), 1); - let revoked_commitment_tx = &revoked_local_txn[0]; - - // Send another payment, now revoking the previous commitment tx - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); - - let justice_tx = persisters[1].justice_tx(channel_id, &revoked_commitment_tx.compute_txid()).unwrap(); - check_spends!(justice_tx, revoked_commitment_tx); - - mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); - mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); - - check_added_monitors!(nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[0].node.get_our_node_id()], 100_000); - get_announce_close_broadcast_events(&nodes, 1, 0); - - check_added_monitors!(nodes[0], 1); - check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[1].node.get_our_node_id()], 100_000); - - // Check that the justice tx has sent the revoked output value to nodes[1] - let monitor = get_monitor!(nodes[1], channel_id); - let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| { - match balance { - channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis, - _ => panic!("Unexpected balance type"), - } - }); - // On the first commitment, node[1]'s balance was below dust so it didn't have an output - let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() }; - let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat(); - assert_eq!(total_claimable_balance, expected_claimable_balance); -} - - -#[xtest(feature = "_externalize_tests")] -pub fn claim_htlc_outputs() { - // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx - let mut chanmon_cfgs = create_chanmon_cfgs(2); - chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // Create some new channel: - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &[&nodes[1]], 8_000_000); - // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx - let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; - let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); - - // Get the will-be-revoked local txn from node[0] - let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); - assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx - assert_eq!(revoked_local_txn[0].input.len(), 1); - assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); - assert_eq!(revoked_local_txn[1].input.len(), 1); - assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].compute_txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout - check_spends!(revoked_local_txn[1], revoked_local_txn[0]); - - // Revoke the old state. - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); - - { - mine_transaction(&nodes[0], &revoked_local_txn[0]); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty txn - - // The ChannelMonitor should claim the accepted HTLC output separately from the offered - // HTLC and to_self outputs. - let accepted_claim = node_txn.iter().filter(|tx| tx.input.len() == 1).next().unwrap(); - let offered_to_self_claim = node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); - check_spends!(accepted_claim, revoked_local_txn[0]); - check_spends!(offered_to_self_claim, revoked_local_txn[0]); - assert_eq!(accepted_claim.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - - let mut witness_lens = BTreeSet::new(); - witness_lens.insert(offered_to_self_claim.input[0].witness.last().unwrap().len()); - witness_lens.insert(offered_to_self_claim.input[1].witness.last().unwrap().len()); - assert_eq!(witness_lens.len(), 2); - assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); - - // Finally, mine the penalty transaction and check that we get an HTLC failure after - // ANTI_REORG_DELAY confirmations. - mine_transaction(&nodes[1], accepted_claim); - connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_payment_failed!(nodes[1], payment_hash_2, false); - } - get_announce_close_broadcast_events(&nodes, 0, 1); - assert_eq!(nodes[0].node.list_channels().len(), 0); - assert_eq!(nodes[1].node.list_channels().len(), 0); -} - -// Test that the HTLC package logic removes HTLCs from the package when they are claimed by the -// counterparty, even when the counterparty claims HTLCs from multiple packages in a single -// transaction. -// -// This is a regression test for https://github.com/lightningdevkit/rust-lightning/issues/3537. -#[xtest(feature = "_externalize_tests")] -pub fn test_multiple_package_conflicts() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let mut user_cfg = test_default_channel_config(); - - // Anchor channels are required so that multiple HTLC-Successes can be aggregated into a single - // transaction. - user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; - user_cfg.manually_accept_inbound_channels = true; - - let node_chanmgrs = - create_node_chanmgrs(3, &node_cfgs, &[Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // Since we're using anchor channels, make sure each node has a UTXO for paying fees. - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[2].wallet_source.get_change_script().unwrap(), - }, - ], - }; - nodes[0].wallet_source.add_utxo( - bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, - coinbase_tx.output[0].value, - ); - nodes[1].wallet_source.add_utxo( - bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, - coinbase_tx.output[1].value, - ); - nodes[2].wallet_source.add_utxo( - bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 2 }, - coinbase_tx.output[2].value, - ); - - // Create the network. - // 0 -- 1 -- 2 - // - // Payments will be routed from node 0 to node 2. Node 2 will force close and spend HTLCs from - // two of node 1's packages. We will then verify that node 1 correctly removes the conflicting - // HTLC spends from its packages. - const CHAN_CAPACITY: u64 = 10_000_000; - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_CAPACITY, 0); - let (_, _, cid_1_2, funding_tx_1_2) = - create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_CAPACITY, 0); - - // Ensure all nodes are at the same initial height. - let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); - for node in &nodes { - let blocks_to_mine = node_max_height - node.best_block_info().1; - if blocks_to_mine > 0 { - connect_blocks(node, blocks_to_mine); - } - } - - // Route HTLC 1. - let (preimage_1, payment_hash_1, ..) = - route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - - // Route HTLCs 2 and 3, with CLTVs 1 higher than HTLC 1. The higher CLTVs will cause these - // HTLCs to be included in a different package than HTLC 1. - connect_blocks(&nodes[0], 1); - connect_blocks(&nodes[1], 1); - connect_blocks(&nodes[2], 1); - let (preimage_2, payment_hash_2, ..) = - route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000_000); - - // Mine blocks until HTLC 1 times out in 1 block and HTLCs 2 and 3 time out in 2 blocks. - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); - - // Node 2 force closes, causing node 1 to group the HTLCs into the following packages: - // Package 1: HTLC 1 - // Package 2: HTLCs 2 and 3 - let node2_commit_tx = get_local_commitment_txn!(nodes[2], cid_1_2); - assert_eq!(node2_commit_tx.len(), 1); - let node2_commit_tx = &node2_commit_tx[0]; - check_spends!(node2_commit_tx, funding_tx_1_2); - mine_transaction(&nodes[1], node2_commit_tx); - check_closed_event( - &nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - false, - &[nodes[2].node.get_our_node_id()], - CHAN_CAPACITY, - ); - check_closed_broadcast!(nodes[1], true); - check_added_monitors(&nodes[1], 1); - - // Node 1 should immediately claim package 1 but has to wait a block to claim package 2. - let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); - assert_eq!(timeout_tx.len(), 1); - check_spends!(timeout_tx[0], node2_commit_tx); - assert_eq!(timeout_tx[0].input.len(), 1); - - // After one block, node 1 should also attempt to claim package 2. - connect_blocks(&nodes[1], 1); - let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); - assert_eq!(timeout_tx.len(), 1); - check_spends!(timeout_tx[0], node2_commit_tx); - assert_eq!(timeout_tx[0].input.len(), 2); + // After one block, node 1 should also attempt to claim package 2. + connect_blocks(&nodes[1], 1); + let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(timeout_tx.len(), 1); + check_spends!(timeout_tx[0], node2_commit_tx); + assert_eq!(timeout_tx[0].input.len(), 2); // Force node 2 to broadcast an aggregated HTLC-Success transaction spending HTLCs 1 and 2. // This will conflict with both of node 1's HTLC packages. @@ -3101,7 +1620,7 @@ pub fn test_multiple_package_conflicts() { 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[1].node.get_our_node_id()], + &[node_b_id], CHAN_CAPACITY, ); check_closed_broadcast!(nodes[2], true); @@ -3146,21 +1665,15 @@ pub fn test_multiple_package_conflicts() { // // Because two update_fulfill_htlc messages are created at once, the commitment_signed_dance // macro doesn't work properly and we must process the first update_fulfill_htlc manually. - let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), - &updates.update_fulfill_htlcs[0], - ); - nodes[0] - .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors(&nodes[0], 1); - let (revoke_ack, commit_signed) = - get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_ack); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commit_signed); + let (revoke_ack, commit_signed) = get_revoke_commit_msgs(&nodes[0], &node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_ack); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commit_signed); check_added_monitors(&nodes[1], 4); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3169,7 +1682,7 @@ pub fn test_multiple_package_conflicts() { MessageSendEvent::SendRevokeAndACK { node_id: _, msg } => msg, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), revoke_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, revoke_ack); expect_payment_sent!(nodes[0], preimage_1); let updates = match &events[0] { @@ -3177,10 +1690,7 @@ pub fn test_multiple_package_conflicts() { _ => panic!("Unexpected event"), }; assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), - &updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], preimage_2); @@ -3231,22 +1741,29 @@ pub fn test_htlc_on_chain_success() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); - let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (our_payment_preimage, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (our_payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Broadcast legit commitment tx from C on B's chain // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain @@ -3257,8 +1774,8 @@ pub fn test_htlc_on_chain_success() { expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000); nodes[2].node.claim_funds(our_payment_preimage_2); expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[2], 2); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[2], 2); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -3266,21 +1783,28 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); check_spends!(node_txn[1], commitment_tx[0]); - assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + node_txn[0].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); + assert_eq!( + node_txn[1].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(node_txn[0].lock_time, LockTime::ZERO); assert_eq!(node_txn[1].lock_time, LockTime::ZERO); // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()])); + let txn = vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -3291,13 +1815,18 @@ pub fn test_htlc_on_chain_success() { let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); match forwarded_events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } let chan_id = Some(chan_1.2); match forwarded_events[1] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); @@ -3305,11 +1834,16 @@ pub fn test_htlc_on_chain_success() { assert_eq!(next_channel_id, Some(chan_2.2)); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, - _ => panic!() + _ => panic!(), } match forwarded_events[2] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); @@ -3317,7 +1851,7 @@ pub fn test_htlc_on_chain_success() { assert_eq!(next_channel_id, Some(chan_2.2)); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, - _ => panic!() + _ => panic!(), } let mut events = nodes[1].node.get_and_clear_pending_msg_events(); { @@ -3329,21 +1863,32 @@ pub fn test_htlc_on_chain_success() { } assert_eq!(events.len(), 3); - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, + MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, .. } => {}, _ => panic!("Unexpected event"), } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -3355,7 +1900,7 @@ pub fn test_htlc_on_chain_success() { } macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { + ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => {{ let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the // remote commitment transaction. @@ -3364,20 +1909,32 @@ pub fn test_htlc_on_chain_success() { for tx in node_txn.iter() { check_spends!(tx, $commitment_tx); assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + tx.input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + assert_ne!( + node_txn[0].input[0].previous_output, + node_txn[1].input[0].previous_output + ); } else { assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], $commitment_tx); assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + node_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); + assert_ne!( + node_txn[0].input[0].previous_output, + node_txn[0].input[1].previous_output + ); } node_txn.clear(); - } } + }}; } // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); @@ -3388,25 +1945,24 @@ pub fn test_htlc_on_chain_success() { check_spends!(node_a_commitment_tx[0], chan_1.3); mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 2); // HTLC-Success, RBF bump of above aggregated HTLC txn - let commitment_spend = - if node_txn.len() == 1 { + let commitment_spend = if node_txn.len() == 1 { + &node_txn[0] + } else { + // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast. + // FullBlockViaListen + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].compute_txid() { + check_spends!(node_txn[1], commitment_tx[0]); &node_txn[0] } else { - // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast. - // FullBlockViaListen - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].compute_txid() { - check_spends!(node_txn[1], commitment_tx[0]); - &node_txn[0] - } else { - check_spends!(node_txn[0], commitment_tx[0]); - &node_txn[1] - } - }; + check_spends!(node_txn[0], commitment_tx[0]); + &node_txn[1] + } + }; check_spends!(commitment_spend, node_a_commitment_tx[0]); assert_eq!(commitment_spend.input.len(), 2); @@ -3414,14 +1970,16 @@ pub fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1); assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment + // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()])); + let txn = vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]; + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); let mut first_claimed = false; @@ -3457,6 +2015,11 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + *nodes[0].connect_style.borrow_mut() = connect_style; *nodes[1].connect_style.borrow_mut() = connect_style; *nodes[2].connect_style.borrow_mut() = connect_style; @@ -3466,56 +2029,79 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Rebalance the network a bit by relaying one payment thorugh all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (_payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); // Broadcast legit commitment tx from C on B's chain let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); - check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 0); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }] + ); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + assert_eq!(node_b_id, *node_id); }, _ => panic!("Unexpected event"), }; mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 0); // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[2].node.get_our_node_id()], 100000); - let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); + let htlc_expiry = get_monitor!(nodes[1], chan_2.2) + .get_claimable_balances() + .iter() + .filter_map(|bal| { if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) } else { None } - ).next().unwrap(); + }) + .next() + .unwrap(); connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); let timeout_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0])); - assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + txn[0].clone().input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); txn.remove(0) }; @@ -3526,22 +2112,36 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { assert_eq!(nodes[1].tx_broadcaster.txn_broadcast().len(), 0); mine_transaction(&nodes[1], &timeout_tx); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -3554,12 +2154,15 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); - assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); + assert_eq!( + node_txn[0].clone().input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); } #[xtest(feature = "_externalize_tests")] @@ -3579,11 +2182,16 @@ pub fn test_simple_commitment_revoked_fail_backward() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (payment_preimage, _payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); // Get the will-be-revoked local txn from nodes[2] let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2); // Revoke the old state @@ -3592,32 +2200,50 @@ pub fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + ref commitment_signed, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); - expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true); + let scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash, false, scid, true); }, _ => panic!("Unexpected event"), } } -fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) { +fn do_test_commitment_revoked_fail_backward_exhaustive( + deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool, +) { // Test that if our counterparty broadcasts a revoked commitment transaction we fail all // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest // commitment transaction anymore. @@ -3638,11 +2264,17 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 }); + let amt = if no_to_remote { 10_000 } else { 3_000_000 }; + let (payment_preimage, _payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], amt); // Get the will-be-revoked local txn from nodes[2] let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2); assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 }); @@ -3652,85 +2284,102 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000 - } else { 3000000 }; + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[2], nodes[1], per_peer_state_lock, peer_state_lock, chan_2.2); + chan.context().holder_dust_limit_satoshis * 1000 + } else { + 3000000 + }; let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + let cs = updates.commitment_signed; + let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], cs, false, true, false, true); // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }]); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); + check_added_monitors(&nodes[1], 1); // Note that nodes[1] is in AwaitingRAA, so won't send a CS - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); + check_added_monitors(&nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }]); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }] + ); + check_added_monitors(&nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); // At this point first_payment_hash has dropped out of the latest two commitment // transactions that nodes[1] is tracking... - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); + check_added_monitors(&nodes[1], 1); // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); - check_added_monitors!(nodes[2], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); + check_added_monitors(&nodes[2], 1); // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting // on nodes[2]'s RAA. - let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); - nodes[1].node.send_payment_with_route(route, fourth_payment_hash, - RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap(); + let (route, fourth_payment_hash, _, fourth_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); + let onion = RecipientOnionFields::secret_only(fourth_payment_secret); + let id = PaymentId(fourth_payment_hash.0); + nodes[1].node.send_payment_with_route(route, fourth_payment_hash, onion, id).unwrap(); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); if deliver_bs_raa { - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_raa); // One monitor for the new revocation preimage, no second on as we won't generate a new // commitment transaction for nodes[0] until process_pending_htlc_forwards(). - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::HTLCHandlingFailed { .. } => { }, + Event::HTLCHandlingFailed { .. } => {}, _ => panic!("Unexpected event"), } match events[1] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; // Deliberately don't process the pending fail-back so they all fail back at once after @@ -3741,7 +2390,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); @@ -3760,16 +2409,27 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use ))); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 }); if deliver_bs_raa { - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { - assert_eq!(nodes[2].node.get_our_node_id(), *node_id); + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { + assert_eq!(node_c_id, *node_id); assert_eq!(update_add_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -3779,27 +2439,46 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use } } - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => { + MessageSendEvent::HandleError { + action: + ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, + .. + } => { assert_eq!(channel_id, chan_2.2); - assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); + assert_eq!( + data.as_str(), + "Channel closed because commitment or closing transaction was confirmed on chain." + ); }, _ => panic!("Unexpected event"), } - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + ref commitment_signed, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[2]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); @@ -3811,7 +2490,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // If we delivered B's RAA we got an unknown preimage error, not something // that we should update our routing table for. if !deliver_bs_raa { - if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") } + if let PathFailure::OnPath { network_update: Some(_) } = failure { + } else { + panic!("Unexpected path failure") + } } }, _ => panic!("Unexpected event"), @@ -3823,7 +2505,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { + Event::PaymentPathFailed { + ref payment_hash, + failure: PathFailure::OnPath { network_update: Some(_) }, + .. + } => { assert!(failed_htlcs.insert(payment_hash.0)); }, _ => panic!("Unexpected event"), @@ -3835,7 +2521,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[4] { - Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { + Event::PaymentPathFailed { + ref payment_hash, + failure: PathFailure::OnPath { network_update: Some(_) }, + .. + } => { assert!(failed_htlcs.insert(payment_hash.0)); }, _ => panic!("Unexpected event"), @@ -3883,46 +2573,65 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack. { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); - nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let payment_event = { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); assert_eq!(payment_event.msgs.len(), 1); } // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack. - let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); + let (route, failed_payment_hash, _, failed_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); { - nodes[0].node.send_payment_with_route(route, failed_payment_hash, - RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 0); + let onion = RecipientOnionFields::secret_only(failed_payment_secret); + let id = PaymentId(failed_payment_hash.0); + nodes[0].node.send_payment_with_route(route, failed_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel. { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 50_000); let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let current_height = nodes[1].node.best_block.read().unwrap().height + 1; let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( - &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None, None, None).unwrap(); - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + &route.paths[0], + 50_000, + &recipient_onion_fields, + current_height, + &None, + None, + None, + ) + .unwrap(); + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let onion_routing_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); // Send a 0-msat update_add_htlc to fail the channel. let update_add_htlc = msgs::UpdateAddHTLC { @@ -3935,7 +2644,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { skimmed_fee_msat: None, blinding_point: None, }; - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_htlc); + nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); @@ -3959,7 +2668,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { _ => panic!("Unexpected event {:?}", events[1]), } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -3970,6 +2679,10 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen { // We rely on the ability to connect a block redundantly, which isn't allowed via // `chain::Listen`, so we never run the test if we randomly get assigned that @@ -3979,11 +2692,19 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; let error_message = "Channel force-closed"; route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &nodes[0].node.list_channels()[0].channel_id, + &node_b_id, + error_message.to_string(), + ) + .unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(node_txn.len(), 2); @@ -3993,8 +2714,8 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]); connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -4008,22 +2729,29 @@ pub fn test_force_close_fail_back() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4033,21 +2761,26 @@ pub fn test_force_close_fail_back() { payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); - check_added_monitors!(nodes[1], 1); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); - let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[1], 1); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); + check_added_monitors(&nodes[2], 1); + let (_, _) = get_revoke_commit_msgs!(nodes[2], node_b_id); // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). let error_message = "Channel force-closed"; let channel_id = payment_event.commitment_msg[0].channel_id; - nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[2] + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()) + .unwrap(); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[2], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[2], 1, reason, [node_b_id], 100000); + let commitment_tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -4061,20 +2794,25 @@ pub fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - get_monitor!(nodes[2], channel_id) - .provide_payment_preimage_unsafe_legacy( - &our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, - &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger - ); + get_monitor!(nodes[2], channel_id).provide_payment_preimage_unsafe_legacy( + &our_payment_hash, + &our_payment_preimage, + &node_cfgs[2].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), + &node_cfgs[2].logger, + ); } mine_transaction(&nodes[2], &commitment_tx); let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast(); - assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 }); + assert_eq!( + node_txn.len(), + if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 } + ); let htlc_tx = node_txn.pop().unwrap(); assert_eq!(htlc_tx.input.len(), 1); assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.compute_txid()); @@ -4095,19 +2833,23 @@ pub fn test_dup_events_on_peer_disconnect() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); - check_added_monitors!(nodes[1], 1); - let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); + check_added_monitors(&nodes[1], 1); + let claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &claim_msgs.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.pending_htlc_claims.0 = 1; @@ -4124,20 +2866,29 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never // broadcasted, even though it's created by `nodes[0]`. - let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - - let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let expected_temporary_channel_id = + nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + + let (temporary_channel_id, tx, _funding_output) = + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); - assert!(nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).is_ok()); + assert!(nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) + .is_ok()); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is @@ -4148,8 +2899,8 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { } // The peers disconnect before the funding is broadcasted. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // The time for peers to reconnect expires. for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS { @@ -4159,10 +2910,8 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a // `DiscardFunding` event when the peers are disconnected and do not reconnect before the // funding transaction is broadcasted. - check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true - , [nodes[1].node.get_our_node_id()], 1000000); - check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false - , [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true, [node_b_id], 1000000); + check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, [node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -4172,35 +2921,40 @@ pub fn test_simple_peer_disconnect() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_nodes(reconnect_args); - let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; + let payment_hash_2 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; + fail_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_hash_2); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_1); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + let (payment_preimage_3, payment_hash_3, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let payment_preimage_4 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; + let payment_hash_5 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; + let payment_hash_6 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3) - .skip_last(true) + .skip_last(true), ); fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); @@ -4238,8 +2992,8 @@ pub fn test_simple_peer_disconnect() { } check_added_monitors(&nodes[0], 1); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_4); + fail_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_hash_6); } fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) { @@ -4249,9 +3003,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let mut as_channel_ready = None; let channel_id = if messages_delivered == 0 { - let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); + let (channel_ready, chan_id, _) = + create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); as_channel_ready = Some(channel_ready); // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect) // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver @@ -4262,51 +3020,59 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); let payment_event = { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + assert_eq!(node_b_id, payment_event.node_id); if messages_delivered < 2 { // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! } else { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); if messages_delivered >= 3 { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); + check_added_monitors(&nodes[1], 1); + let (bs_revoke_and_ack, bs_commitment_signed) = + get_revoke_commit_msgs!(nodes[1], node_a_id); if messages_delivered >= 4 { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if messages_delivered >= 5 { - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_and_ack = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if messages_delivered >= 6 { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } } } } } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); if messages_delivered < 3 { if simulate_broken_lnd { // lnd has a long-standing bug where they send a channel_ready prior to a @@ -4317,7 +3083,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken // in `reconnect_nodes` but we currently don't fail based on that. // // See-also - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready.as_ref().unwrap().0); } // Even if the channel_ready messages get exchanged, as long as nothing further was // received on either side, both sides will need to resend them. @@ -4350,23 +3116,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered == 0 { assert_eq!(events_1.len(), 2); match events_1[0] { - Event::ChannelReady { .. } => { }, + Event::ChannelReady { .. } => {}, _ => panic!("Unexpected event"), }; match events_1[1] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; } else { assert_eq!(events_1.len(), 1); match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.process_pending_htlc_forwards(); @@ -4374,31 +3140,40 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_2 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_1, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), } nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_3 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); let (update_fulfill_htlc, commitment_signed) = match events_3[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -4410,7 +3185,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; if messages_delivered >= 1 { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlc); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlc); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -4423,33 +3198,37 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } if messages_delivered >= 2 { - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - check_added_monitors!(nodes[0], 1); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + check_added_monitors(&nodes[0], 1); + let (as_revoke_and_ack, as_commitment_signed) = + get_revoke_commit_msgs!(nodes[0], node_b_id); if messages_delivered >= 3 { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if messages_delivered >= 4 { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); + let bs_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if messages_delivered >= 5 { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } } } } } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); if messages_delivered < 2 { let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.pending_htlc_claims.0 = 1; @@ -4484,8 +3263,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken expect_payment_path_successful!(nodes[0]); } if messages_delivered <= 5 { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); } reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -4525,6 +3304,10 @@ pub fn test_channel_ready_without_best_block_updated() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); @@ -4537,8 +3320,8 @@ pub fn test_channel_ready_without_best_block_updated() { nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4548,6 +3331,9 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Let channel_manager get ahead of chain_monitor by 1 block. // This is to emulate race-condition where newly added channel_monitor skips processing 1 block, // in case where client calls block_connect on channel_manager first and then on chain_monitor. @@ -4569,8 +3355,8 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4580,6 +3366,9 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Let chain_monitor get ahead of channel_manager by 1 block. // This is to emulate race-condition where newly added channel_monitor skips processing 1 block, // in case where client calls block_connect on chain_monitor first and then on channel_manager. @@ -4604,8 +3393,8 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4616,15 +3405,22 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); @@ -4635,20 +3431,32 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { + assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -4659,31 +3467,33 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); - let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); + let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -4699,43 +3509,54 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1] + .node + .handle_update_add_htlc(node_a_id, &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test( + node_a_id, + &as_resp.2.as_ref().unwrap().commitment_signed, + ); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()); - let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); + let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + let as_commitment_signed = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_commitment_signed.update_add_htlcs.is_empty()); assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(as_commitment_signed.update_fail_htlcs.is_empty()); assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test( + node_b_id, + &bs_second_commitment_signed.commitment_signed, + ); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed.commitment_signed); + let bs_second_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4745,19 +3566,21 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => { assert_eq!(payment_hash_2, *payment_hash); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), } - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -4771,25 +3594,52 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let our_payment_hash = if send_partial_mpp { - let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); + let (route, our_payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let payment_id = PaymentId([42; 32]); - let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap(); - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, - &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(payment_secret); + let session_privs = nodes[0] + .node + .test_add_new_pending_payment(our_payment_hash, onion, payment_id, &route) + .unwrap(); + + nodes[0] + .node + .test_send_payment_along_path( + &route.paths[0], + &our_payment_hash, + RecipientOnionFields::secret_only(payment_secret), + 200_000, + cur_height, + payment_id, + &None, + session_privs[0], + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Now do the relevant commitment_signed/RAA dances along the path, noting that the final // hop should *not* yet generate any PaymentClaimable event(s). - pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + pass_along_path( + &nodes[0], + &[&nodes[1]], + 100000, + our_payment_hash, + Some(payment_secret), + events.drain(..).next().unwrap(), + false, + None, + ); our_payment_hash } else { route_payment(&nodes[0], &[&nodes[1]], 100000).1 @@ -4798,28 +3648,33 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); - let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS; + let block_count = + TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS; for _ in CHAN_CONFIRM_DEPTH + 2..block_count { block.header.prev_blockhash = block.block_hash(); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); - check_added_monitors!(nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors(&nodes[1], 1); + let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); assert!(htlc_timeout_updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); // 100_000 msat as u64, followed by the height at which we failed back above let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); + let reason = LocalHTLCFailureReason::IncorrectPaymentDetails; + expect_payment_failed!(nodes[0], our_payment_hash, true, reason, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -4834,34 +3689,45 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Make sure all nodes are at the same starting height - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // Route a first payment to get the 1 -> 2 channel in awaiting_raa... - let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - nodes[1].node.send_payment_with_route(route, first_payment_hash, - RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap(); + let (route, first_payment_hash, _, first_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let onion = RecipientOnionFields::secret_only(first_payment_secret); + let id = PaymentId(first_payment_hash.0); + nodes[1].node.send_payment_with_route(route, first_payment_hash, onion, id).unwrap(); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now attempt to route a second payment, which should be placed in the holding cell let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] }; - let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000); - sending_node.node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); + let (route, second_payment_hash, _, second_payment_secret) = + get_route_and_payment_hash!(sending_node, nodes[2], 100000); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let id = PaymentId(second_payment_hash.0); + sending_node.node.send_payment_with_route(route, second_payment_hash, onion, id).unwrap(); + if forwarded_htlc { - check_added_monitors!(nodes[0], 1); - let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors(&nodes[0], 1); + let payment_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); } - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4869,18 +3735,24 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + check_added_monitors(&nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); match fail_commit[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, + .. + } => { + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true); }, _ => unreachable!(), } - expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false); + let scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, scid, false); } else { expect_payment_failed!(nodes[1], second_payment_hash, false); } @@ -4893,31 +3765,46 @@ pub fn test_holding_cell_htlc_add_timeouts() { } macro_rules! check_spendable_outputs { - ($node: expr, $keysinterface: expr) => { - { - let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); - let mut txn = Vec::new(); - let mut all_outputs = Vec::new(); - let secp_ctx = Secp256k1::new(); - for event in events.drain(..) { - match event { - Event::SpendableOutputs { mut outputs, channel_id: _ } => { - for outp in outputs.drain(..) { - txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap()); - all_outputs.push(outp); - } - }, - _ => panic!("Unexpected event"), - }; - } - if all_outputs.len() > 1 { - if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) { - txn.push(tx); - } + ($node: expr, $keysinterface: expr) => {{ + let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); + let mut txn = Vec::new(); + let mut all_outputs = Vec::new(); + let secp_ctx = Secp256k1::new(); + for event in events.drain(..) { + match event { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { + for outp in outputs.drain(..) { + let script = + Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); + let tx = $keysinterface.backing.spend_spendable_outputs( + &[&outp], + Vec::new(), + script, + 253, + None, + &secp_ctx, + ); + txn.push(tx.unwrap()); + all_outputs.push(outp); + } + }, + _ => panic!("Unexpected event"), + }; + } + if all_outputs.len() > 1 { + if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs( + &all_outputs.iter().map(|a| a).collect::>(), + Vec::new(), + Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), + 253, + None, + &secp_ctx, + ) { + txn.push(tx); } - txn } - } + txn + }}; } #[xtest(feature = "_externalize_tests")] @@ -4928,12 +3815,16 @@ pub fn test_claim_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, err).unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); @@ -4957,13 +3848,18 @@ pub fn test_claim_on_remote_sizeable_push_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let error_message = "Channel force-closed"; + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let err = "Channel force-closed".to_string(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, err).unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -4972,8 +3868,8 @@ pub fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4991,17 +3887,19 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); @@ -5021,6 +3919,8 @@ pub fn test_static_spendable_outputs_preimage_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5033,9 +3933,9 @@ pub fn test_static_spendable_outputs_preimage_tx() { // Settle A's commitment tx on B's chain nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5053,7 +3953,7 @@ pub fn test_static_spendable_outputs_preimage_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5068,13 +3968,15 @@ pub fn test_static_spendable_outputs_timeout_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network a bit by relaying one payment through all the channels ... - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); - let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000); + let (_, our_payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(commitment_tx[0].input.len(), 1); @@ -5082,7 +3984,7 @@ pub fn test_static_spendable_outputs_timeout_tx() { // Settle A's commitment tx on B' chain mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[1] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, @@ -5093,11 +3995,11 @@ pub fn test_static_spendable_outputs_timeout_tx() { // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx - check_spends!(node_txn[0], commitment_tx[0].clone()); + check_spends!(node_txn[0], commitment_tx[0].clone()); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, false); @@ -5114,24 +4016,29 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); if split_tx { - connect_blocks(&nodes[1], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1); + connect_blocks( + &nodes[1], + TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1, + ); } mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll // claim both the revoked and HTLC outputs in one transaction, otherwise we'll split them as we @@ -5169,27 +4076,33 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // A will generate HTLC-Timeout from revoked commitment tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(revoked_htlc_txn.len(), 1); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout @@ -5198,10 +4111,11 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // locktime. connect_blocks(&nodes[1], TEST_FINAL_CLTV); // B will generate justice tx from A's revoked commitment/HTLC tx - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); + let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // There will be 2 justice transactions: // - One on the unpinnable, revoked to_self output on the commitment transaction and on @@ -5239,10 +4153,13 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); @@ -5250,18 +4167,21 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // The to-be-revoked commitment tx should have one HTLC and one to_remote output assert_eq!(revoked_local_txn[0].output.len(), 2); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // B will generate HTLC-Success from revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 1); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH: @@ -5269,10 +4189,11 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH // A will generate justice tx from B's revoked commitment/HTLC tx - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); + let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // There will be 2 justice transactions, one on the revoked HTLC output on the commitment // transaction, and one on the revoked to_self output on the HTLC-success transaction. @@ -5317,27 +4238,33 @@ pub fn test_onchain_to_onchain_claim() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels ... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[2], 1); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -5345,28 +4272,37 @@ pub fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 1); check_spends!(c_txn[0], commitment_tx[0]); - assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + c_txn[0].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()])); - check_added_monitors!(nodes[1], 1); + let txn = vec![commitment_tx[0].clone(), c_txn[0].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, Some(chan_1.2)); @@ -5376,24 +4312,38 @@ pub fn test_onchain_to_onchain_claim() { }, _ => panic!("Unexpected event"), } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events); - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut msg_events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut msg_events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id: _, + } => {}, _ => panic!("Unexpected event"), } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -5407,7 +4357,7 @@ pub fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx assert_eq!(b_txn.len(), 1); @@ -5417,7 +4367,7 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -5432,30 +4382,47 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // It is now fixed, so we simply set the fee to the expected value here. let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, - &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [ + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + ]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs); let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + // Create the required channels and route one HTLC from A to D and another from A to E. create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); create_announced_chan_between_nodes(&nodes, 2, 3); create_announced_chan_between_nodes(&nodes, 2, 4); - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height * 2 - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height * 2 - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height * 2 - nodes[2].best_block_info().1); connect_blocks(&nodes[3], node_max_height * 2 - nodes[3].best_block_info().1); connect_blocks(&nodes[4], node_max_height * 2 - nodes[4].best_block_info().1); - let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); + let (our_payment_preimage, dup_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); - let payment_secret = nodes[4].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap(); - let payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[4].node.bolt11_invoice_features()).unwrap(); + let payment_secret = + nodes[4].node.create_inbound_payment_for_hash(dup_payment_hash, None, 7200, None).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[4].node.bolt11_invoice_features()) + .unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[4], payment_params, 800_000); - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[4]]], 800_000, duplicate_payment_hash, payment_secret); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2], &nodes[4]]]; + send_along_route_with_secret(&nodes[0], route, path, 800_000, dup_payment_hash, payment_secret); // Now mine C's commitment transaction on node B and mine enough blocks to get the HTLC timeout // transaction (which we'll split in two so that we can resolve the HTLCs differently). @@ -5466,8 +4433,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Confirm blocks until both HTLCs expire and get a transaction which times out one HTLC. connect_blocks(&nodes[1], TEST_FINAL_CLTV + config.channel_config.cltv_expiry_delta as u32); @@ -5486,10 +4453,12 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert_eq!(tx.input[1].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); // Split the HTLC claim transaction into two, one for each HTLC. - if commitment_txn[0].output[tx.input[1].previous_output.vout as usize].value.to_sat() < 850 { + if commitment_txn[0].output[tx.input[1].previous_output.vout as usize].value.to_sat() < 850 + { tx.input.remove(1); } - if commitment_txn[0].output[tx.input[0].previous_output.vout as usize].value.to_sat() < 850 { + if commitment_txn[0].output[tx.input[0].previous_output.vout as usize].value.to_sat() < 850 + { tx.input.remove(0); } assert_eq!(tx.input.len(), 1); @@ -5498,1806 +4467,1437 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Now give node E the payment preimage and pass it back to C. nodes[4].node.claim_funds(our_payment_preimage); - expect_payment_claimed!(nodes[4], duplicate_payment_hash, 800_000); - check_added_monitors!(nodes[4], 1); - let updates = get_htlc_update_msgs!(nodes[4], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fulfill_htlc(nodes[4].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - let _cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + expect_payment_claimed!(nodes[4], dup_payment_hash, 800_000); + check_added_monitors(&nodes[4], 1); + let updates = get_htlc_update_msgs!(nodes[4], node_c_id); + nodes[2].node.handle_update_fulfill_htlc(node_e_id, &updates.update_fulfill_htlcs[0]); + let _cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[4], &updates.commitment_signed, false); - // Mine the commitment transaction on node C and get the HTLC success transactions it will - // generate (note that the ChannelMonitor doesn't differentiate between HTLCs once it has the - // preimage). - mine_transaction(&nodes[2], &commitment_txn[0]); - check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - check_closed_broadcast(&nodes[2], 1, true); - - let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) - check_spends!(htlc_success_txn[0], commitment_txn[0]); - check_spends!(htlc_success_txn[1], commitment_txn[0]); - assert_eq!(htlc_success_txn[0].input.len(), 1); - // Note that the witness script lengths are one longer than our constant as the CLTV value went - // to two bytes rather than one. - assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); - assert_eq!(htlc_success_txn[1].input.len(), 1); - assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); - assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output); - - let htlc_success_tx_to_confirm = - if htlc_success_txn[0].input[0].previous_output == htlc_timeout_tx.input[0].previous_output { - &htlc_success_txn[1] - } else { - &htlc_success_txn[0] - }; - assert_ne!(htlc_success_tx_to_confirm.input[0].previous_output, htlc_timeout_tx.input[0].previous_output); - - // Mine the HTLC timeout transaction on node B. - mine_transaction(&nodes[1], &htlc_timeout_tx); - connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(htlc_updates.update_add_htlcs.is_empty()); - assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); - let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id; - assert!(htlc_updates.update_fulfill_htlcs.is_empty()); - assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); - expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true); - - // Finally, give node B the HTLC success transaction and ensure it extracts the preimage to - // provide to node A. - mine_transaction(&nodes[1], htlc_success_tx_to_confirm); - expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // Create some initial channels - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); - let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); - assert_eq!(local_txn.len(), 1); - assert_eq!(local_txn[0].input.len(), 1); - check_spends!(local_txn[0], chan_1.3); - - // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx - nodes[1].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); - - mine_transaction(&nodes[1], &local_txn[0]); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - match events[0] { - MessageSendEvent::UpdateHTLCs { .. } => {}, - _ => panic!("Unexpected event"), - } - match events[2] { - MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - _ => panic!("Unexepected event"), - } - let node_tx = { - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[0], local_txn[0]); - node_txn[0].clone() - }; - - mine_transaction(&nodes[1], &node_tx); - connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1); - - // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(spend_txn.len(), 1); - assert_eq!(spend_txn[0].input.len(), 1); - check_spends!(spend_txn[0], node_tx); - assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); -} - -fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) { - // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an - // unrevoked commitment transaction. - // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting - // a remote RAA before they could be failed backwards (and combinations thereof). - // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which - // use the same payment hashes. - // Thus, we use a six-node network: - // - // A \ / E - // - C - D - - // B / \ F - // And test where C fails back to A/B when D announces its latest commitment transaction - let chanmon_cfgs = create_chanmon_cfgs(6); - let node_cfgs = create_node_cfgs(6, &chanmon_cfgs); - // When this test was written, the default base fee floated based on the HTLC count. - // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); - config.channel_config.forwarding_fee_base_msat = 196; - let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, - &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let nodes = create_network(6, &node_cfgs, &node_chanmgrs); - - let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2); - let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3); - let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4); - let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5); - - // Rebalance and check output sanity... - send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000); - send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); - assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - - let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis; - // 0th HTLC: - let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee - // 1st HTLC: - let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - // 2nd HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee - // 3rd HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee - // 4th HTLC: - let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - // 5th HTLC: - let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); - // 6th HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap()); - // 7th HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap()); - - // 8th HTLC: - let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - // 9th HTLC: - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee - - // 10th HTLC: - let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee - // 11th HTLC: - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap()); - - // Double-check that six of the new HTLC were added - // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, - // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included). - assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1); - assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8); - - // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. - // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs - nodes[4].node.fail_htlc_backwards(&payment_hash_1); - nodes[4].node.fail_htlc_backwards(&payment_hash_3); - nodes[4].node.fail_htlc_backwards(&payment_hash_5); - nodes[4].node.fail_htlc_backwards(&payment_hash_6); - check_added_monitors!(nodes[4], 0); - - let failed_destinations = vec![ - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_3 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_5 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_6 }, - ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); - check_added_monitors!(nodes[4], 1); - - let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]); - commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); - - // Fail 3rd below-dust and 7th above-dust HTLCs - nodes[5].node.fail_htlc_backwards(&payment_hash_2); - nodes[5].node.fail_htlc_backwards(&payment_hash_4); - check_added_monitors!(nodes[5], 0); - - let failed_destinations_2 = vec![ - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_4 }, - ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); - check_added_monitors!(nodes[5], 1); - - let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]); - nodes[3].node.handle_update_fail_htlc(nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]); - commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false); - - let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); - - // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events - let failed_destinations_3 = vec![ - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); - check_added_monitors!(nodes[3], 1); - let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]); - if deliver_last_raa { - commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false); - } else { - let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true); - } - - // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're - // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th, - // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't - // propagated back to A/B yet (and D has two unrevoked commitment transactions). - // - // We now broadcast the latest commitment transaction, which *should* result in failures for - // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and - // the non-broadcast above-dust HTLCs. - // - // Alternatively, we may broadcast the previous commitment transaction, which should only - // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs. - let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); - - if announce_latest { - mine_transaction(&nodes[2], &ds_last_commitment_tx[0]); - } else { - mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]); - } - let events = nodes[2].node.get_and_clear_pending_events(); - let close_event = if deliver_last_raa { - assert_eq!(events.len(), 2 + 6); - events.last().clone().unwrap() - } else { - assert_eq!(events.len(), 1); - events.last().clone().unwrap() - }; - match close_event { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} - _ => panic!("Unexpected event"), - } - - connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); - check_closed_broadcast!(nodes[2], true); - if deliver_last_raa { - expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - - let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); - } else { - let expected_destinations: Vec = if announce_latest { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() - } else { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() - }; - - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); - } - check_added_monitors!(nodes[2], 3); - - let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events(); - assert_eq!(cs_msgs.len(), 2); - let mut a_done = false; - for msg in cs_msgs { - match msg { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - // Both under-dust HTLCs and the one above-dust HTLC that we had already failed - // should be failed-backwards here. - let target = if *node_id == nodes[0].node.get_our_node_id() { - // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs - for htlc in &updates.update_fail_htlcs { - assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false }); - } - assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 }); - assert!(!a_done); - a_done = true; - &nodes[0] - } else { - // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs - for htlc in &updates.update_fail_htlcs { - assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false }); - } - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 }); - &nodes[1] - }; - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]); - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]); - if announce_latest { - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]); - if *node_id == nodes[0].node.get_our_node_id() { - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]); - } - } - commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true); - }, - _ => panic!("Unexpected event"), - } - } - - let as_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 }); - let mut as_faileds = new_hash_set(); - let mut as_updates = 0; - for event in as_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { - assert!(as_faileds.insert(*payment_hash)); - if *payment_hash != payment_hash_2 { - assert_eq!(*payment_failed_permanently, deliver_last_raa); - } else { - assert!(!payment_failed_permanently); - } - if let PathFailure::OnPath { network_update: Some(_) } = failure { - as_updates += 1; - } - } else if let &Event::PaymentFailed { .. } = event { - } else { panic!("Unexpected event"); } - } - assert!(as_faileds.contains(&payment_hash_1)); - assert!(as_faileds.contains(&payment_hash_2)); - if announce_latest { - assert!(as_faileds.contains(&payment_hash_3)); - assert!(as_faileds.contains(&payment_hash_5)); - } - assert!(as_faileds.contains(&payment_hash_6)); + // Mine the commitment transaction on node C and get the HTLC success transactions it will + // generate (note that the ChannelMonitor doesn't differentiate between HTLCs once it has the + // preimage). + mine_transaction(&nodes[2], &commitment_txn[0]); + check_added_monitors(&nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + check_closed_broadcast(&nodes[2], 1, true); - let bs_events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); - let mut bs_faileds = new_hash_set(); - let mut bs_updates = 0; - for event in bs_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { - assert!(bs_faileds.insert(*payment_hash)); - if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { - assert_eq!(*payment_failed_permanently, deliver_last_raa); - } else { - assert!(!payment_failed_permanently); - } - if let PathFailure::OnPath { network_update: Some(_) } = failure { - bs_updates += 1; - } - } else if let &Event::PaymentFailed { .. } = event { - } else { panic!("Unexpected event"); } - } - assert!(bs_faileds.contains(&payment_hash_1)); - assert!(bs_faileds.contains(&payment_hash_2)); - if announce_latest { - assert!(bs_faileds.contains(&payment_hash_4)); - } - assert!(bs_faileds.contains(&payment_hash_5)); + let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + assert_eq!(htlc_success_txn.len(), 2); // ChannelMonitor: HTLC-Success txn (*2 due to 2-HTLC outputs) + check_spends!(htlc_success_txn[0], commitment_txn[0]); + check_spends!(htlc_success_txn[1], commitment_txn[0]); + assert_eq!(htlc_success_txn[0].input.len(), 1); + // Note that the witness script lengths are one longer than our constant as the CLTV value went + // to two bytes rather than one. + assert_eq!( + htlc_success_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); + assert_eq!(htlc_success_txn[1].input.len(), 1); + assert_eq!( + htlc_success_txn[1].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); + assert_ne!( + htlc_success_txn[0].input[0].previous_output, + htlc_success_txn[1].input[0].previous_output + ); - // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should - // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to - // unknown-preimage-etc, B should have gotten 2. Thus, in the - // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates. - assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 }); - assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 }); -} + let htlc_success_tx_to_confirm = if htlc_success_txn[0].input[0].previous_output + == htlc_timeout_tx.input[0].previous_output + { + &htlc_success_txn[1] + } else { + &htlc_success_txn[0] + }; + assert_ne!( + htlc_success_tx_to_confirm.input[0].previous_output, + htlc_timeout_tx.input[0].previous_output + ); -#[xtest(feature = "_externalize_tests")] -pub fn test_fail_backwards_latest_remote_announce_a() { - do_test_fail_backwards_unrevoked_remote_announce(false, true); -} + // Mine the HTLC timeout transaction on node B. + mine_transaction(&nodes[1], &htlc_timeout_tx); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); + let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(htlc_updates.update_add_htlcs.is_empty()); + assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); + let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id; + assert!(htlc_updates.update_fulfill_htlcs.is_empty()); + assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors(&nodes[1], 1); -#[xtest(feature = "_externalize_tests")] -pub fn test_fail_backwards_latest_remote_announce_b() { - do_test_fail_backwards_unrevoked_remote_announce(true, true); -} + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_updates.update_fail_htlcs[0]); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); + let failing_scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], dup_payment_hash, false, failing_scid, true); -#[xtest(feature = "_externalize_tests")] -pub fn test_fail_backwards_previous_remote_announce() { - do_test_fail_backwards_unrevoked_remote_announce(false, false); - // Note that true, true doesn't make sense as it implies we announce a revoked state, which is - // tested for in test_commitment_revoked_fail_backward_exhaustive() + // Finally, give node B the HTLC success transaction and ensure it extracts the preimage to + // provide to node A. + mine_transaction(&nodes[1], htlc_success_tx_to_confirm); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); + expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } #[xtest(feature = "_externalize_tests")] -pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { +pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000); - let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); + let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); + assert_eq!(local_txn.len(), 1); assert_eq!(local_txn[0].input.len(), 1); check_spends!(local_txn[0], chan_1.3); - // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx - mine_transaction(&nodes[0], &local_txn[0]); - check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); - connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); + check_added_monitors(&nodes[1], 1); - let htlc_timeout = { - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + mine_transaction(&nodes[1], &local_txn[0]); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + match events[0] { + MessageSendEvent::UpdateHTLCs { .. } => {}, + _ => panic!("Unexpected event"), + } + match events[2] { + MessageSendEvent::BroadcastChannelUpdate { .. } => {}, + _ => panic!("Unexepected event"), + } + let node_tx = { + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[0], local_txn[0]); node_txn[0].clone() }; - mine_transaction(&nodes[0], &htlc_timeout); - connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); - expect_payment_failed!(nodes[0], our_payment_hash, false); + mine_transaction(&nodes[1], &node_tx); + connect_blocks(&nodes[1], BREAKDOWN_TIMEOUT as u32 - 1); - // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor - let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); - assert_eq!(spend_txn.len(), 3); - check_spends!(spend_txn[0], local_txn[0]); - assert_eq!(spend_txn[1].input.len(), 1); - check_spends!(spend_txn[1], htlc_timeout); - assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); - assert_eq!(spend_txn[2].input.len(), 2); - check_spends!(spend_txn[2], local_txn[0], htlc_timeout); - assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 || - spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); + // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(spend_txn.len(), 1); + assert_eq!(spend_txn[0].input.len(), 1); + check_spends!(spend_txn[0], node_tx); + assert_eq!(spend_txn[0].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); } -#[xtest(feature = "_externalize_tests")] -pub fn test_key_derivation_params() { - // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key - // manager rotation to test that `channel_keys_id` returned in - // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to - // then derive a `delayed_payment_key`. - - let chanmon_cfgs = create_chanmon_cfgs(3); - - // We manually create the node configuration to backup the seed. - let seed = [42; 32]; - let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); - let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); - let scorer = RwLock::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); - let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; - let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - node_cfgs.remove(0); - node_cfgs.insert(0, node); - - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - // Create some initial channels - // Create a dummy channel to advance index by one and thus test re-derivation correctness - // for node 0 - let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey); +fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) { + // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an + // unrevoked commitment transaction. + // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting + // a remote RAA before they could be failed backwards (and combinations thereof). + // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which + // use the same payment hashes. + // Thus, we use a six-node network: + // + // A \ / E + // - C - D - + // B / \ F + // And test where C fails back to A/B when D announces its latest commitment transaction + let chanmon_cfgs = create_chanmon_cfgs(6); + let node_cfgs = create_node_cfgs(6, &chanmon_cfgs); + // When this test was written, the default base fee floated based on the HTLC count. + // It is now fixed, so we simply set the fee to the expected value here. + let mut config = test_default_channel_config(); + config.channel_config.forwarding_fee_base_msat = 196; - // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; - connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); + let configs = [ + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + ]; + let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &configs); + let nodes = create_network(6, &node_cfgs, &node_chanmgrs); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000); - let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2); - let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2); - assert_eq!(local_txn_1[0].input.len(), 1); - check_spends!(local_txn_1[0], chan_1.3); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + let node_f_id = nodes[5].node.get_our_node_id(); - // We check funding pubkey are unique - let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69])); - let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69])); - if from_0_funding_key_0 == from_1_funding_key_0 - || from_0_funding_key_0 == from_1_funding_key_1 - || from_0_funding_key_1 == from_1_funding_key_0 - || from_0_funding_key_1 == from_1_funding_key_1 { - panic!("Funding pubkeys aren't unique"); - } + let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2); + let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3); + let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4); + let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5); - // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx - mine_transaction(&nodes[0], &local_txn_1[0]); - connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + // Rebalance and check output sanity... + send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000); + send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let htlc_timeout = { - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(node_txn.len(), 1); - assert_eq!(node_txn[0].input.len(), 1); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - check_spends!(node_txn[0], local_txn_1[0]); - node_txn[0].clone() + let ds_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[3], nodes[2], per_peer_state_lock, peer_state_lock, chan_2_3.2); + chan.context().holder_dust_limit_satoshis }; - mine_transaction(&nodes[0], &htlc_timeout); - connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); - expect_payment_failed!(nodes[0], our_payment_hash, false); - - // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor - let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager); - assert_eq!(spend_txn.len(), 3); - check_spends!(spend_txn[0], local_txn_1[0]); - assert_eq!(spend_txn[1].input.len(), 1); - check_spends!(spend_txn[1], htlc_timeout); - assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); - assert_eq!(spend_txn[2].input.len(), 2); - check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout); - assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 || - spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_static_output_closing_tx() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + // 0th HTLC (not added - smaller than dust limit + HTLC tx fee): + let path_4: &[_] = &[&nodes[2], &nodes[3], &nodes[4]]; + let (_, hash_1, ..) = route_payment(&nodes[0], path_4, ds_dust_limit * 1000); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; + // 1st HTLC (not added - smaller than dust limit + HTLC tx fee): + let (_, hash_2, ..) = route_payment(&nodes[0], path_4, ds_dust_limit * 1000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit * 1000); - mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + // 2nd HTLC (not added - smaller than dust limit + HTLC tx fee): + let path_5: &[&[_]] = &[&[&nodes[2], &nodes[3], &nodes[5]]]; + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_1, None, 7200, None).unwrap(); + let route_2 = route.clone(); + send_along_route_with_secret(&nodes[1], route_2, path_5, ds_dust_limit * 1000, hash_1, secret); - let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); - assert_eq!(spend_txn.len(), 1); - check_spends!(spend_txn[0], closing_tx); + // 3rd HTLC (not added - smaller than dust limit + HTLC tx fee): + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_2, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit * 1000, hash_2, secret); - mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); - connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + // 4th HTLC: + let (_, hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(spend_txn.len(), 1); - check_spends!(spend_txn[0], closing_tx); -} + // 5th HTLC: + let (_, hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); -fn do_htlc_claim_local_commitment_only(use_dust: bool) { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + // 6th HTLC: + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_3, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route.clone(), path_5, 1000000, hash_3, secret); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); + // 7th HTLC: + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_4, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_4, secret); - // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being - // present in B's local commitment transaction, but none of A's commitment transactions. - nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); + // 8th HTLC: + let (_, hash_5, ..) = route_payment(&nodes[0], path_4, 1000000); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - expect_payment_sent(&nodes[0], payment_preimage, None, false, false); + // 9th HTLC (not added - smaller than dust limit + HTLC tx fee): + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit * 1000); + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_5, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit * 1000, hash_5, secret); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); - check_added_monitors!(nodes[1], 1); + // 10th HTLC (not added - smaller than dust limit + HTLC tx fee): + let (_, hash_6, ..) = route_payment(&nodes[0], path_4, ds_dust_limit * 1000); - let starting_block = nodes[1].best_block_info(); - let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); - for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 { - connect_block(&nodes[1], &block); - block.header.prev_blockhash = block.block_hash(); - } - test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); - check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000); -} + // 11th HTLC: + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_6, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_6, secret); -fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + // Double-check that six of the new HTLC were added + // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, + // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included). + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1); + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. + // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs + nodes[4].node.fail_htlc_backwards(&hash_1); + nodes[4].node.fail_htlc_backwards(&hash_3); + nodes[4].node.fail_htlc_backwards(&hash_5); + nodes[4].node.fail_htlc_backwards(&hash_6); + check_added_monitors(&nodes[4], 0); - let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let failed_destinations = vec![ + HTLCHandlingFailureType::Receive { payment_hash: hash_1 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_3 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_5 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_6 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); + check_added_monitors(&nodes[4], 1); - // As far as A is concerned, the HTLC is now present only in the latest remote commitment - // transaction, however it is not in A's latest local commitment, so we can just broadcast that - // to "time out" the HTLC. + let four_removes = get_htlc_update_msgs!(nodes[4], node_d_id); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[1]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[2]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[3]); + commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); - let starting_block = nodes[1].best_block_info(); - let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); + // Fail 3rd below-dust and 7th above-dust HTLCs + nodes[5].node.fail_htlc_backwards(&hash_2); + nodes[5].node.fail_htlc_backwards(&hash_4); + check_added_monitors(&nodes[5], 0); - for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 { - connect_block(&nodes[0], &block); - block.header.prev_blockhash = block.block_hash(); - } - test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); -} + let failed_destinations_2 = vec![ + HTLCHandlingFailureType::Receive { payment_hash: hash_2 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_4 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); + check_added_monitors(&nodes[5], 1); -fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let two_removes = get_htlc_update_msgs!(nodes[5], node_d_id); + nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false); - // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present - // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions. - // Also optionally test that we *don't* fail the channel in case the commitment transaction was - // actually revoked. - let htlc_value = if use_dust { 50000 } else { 3000000 }; - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); - nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); - check_added_monitors!(nodes[1], 1); - - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.1); - check_added_monitors!(nodes[1], 1); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); - if check_revoke_no_close { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - check_added_monitors!(nodes[0], 1); + // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events + let failed_destinations_3 = vec![ + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); + check_added_monitors(&nodes[3], 1); + let six_removes = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[0]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[1]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[2]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[3]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[4]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[5]); + if deliver_last_raa { + commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false); + } else { + let cs = six_removes.commitment_signed; + commitment_signed_dance!(nodes[2], nodes[3], cs, false, true, false, true); } - let starting_block = nodes[1].best_block_info(); - let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); - for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 { - connect_block(&nodes[0], &block); - block.header.prev_blockhash = block.block_hash(); + // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're + // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th, + // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't + // propagated back to A/B yet (and D has two unrevoked commitment transactions). + // + // We now broadcast the latest commitment transaction, which *should* result in failures for + // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and + // the non-broadcast above-dust HTLCs. + // + // Alternatively, we may broadcast the previous commitment transaction, which should only + // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs. + let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); + + if announce_latest { + mine_transaction(&nodes[2], &ds_last_commitment_tx[0]); + } else { + mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]); } - if !check_revoke_no_close { - test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); + let events = nodes[2].node.get_and_clear_pending_events(); + let close_event = if deliver_last_raa { + assert_eq!(events.len(), 2 + 6); + events.last().clone().unwrap() } else { - expect_payment_failed!(nodes[0], our_payment_hash, true); + assert_eq!(events.len(), 1); + events.last().clone().unwrap() + }; + match close_event { + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, + _ => panic!("Unexpected event"), } -} -// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window. -// There are only a few cases to test here: -// * its not really normative behavior, but we test that below-dust HTLCs "included" in -// broadcastable commitment transactions result in channel closure, -// * its included in an unrevoked-but-previous remote commitment transaction, -// * its included in the latest remote or local commitment transactions. -// We test each of the three possible commitment transactions individually and use both dust and -// non-dust HTLCs. -// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we -// assume they are handled the same across all six cases, as both outbound and inbound failures are -// tested for at least one of the cases in other tests. -#[xtest(feature = "_externalize_tests")] -pub fn htlc_claim_single_commitment_only_a() { - do_htlc_claim_local_commitment_only(true); - do_htlc_claim_local_commitment_only(false); + connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); + check_closed_broadcast!(nodes[2], true); + if deliver_last_raa { + expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - do_htlc_claim_current_remote_commitment_only(true); - do_htlc_claim_current_remote_commitment_only(false); -} + let expected_destinations: Vec = + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(3) + .collect(); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + expected_destinations + ); + } else { + let expected_destinations: Vec = if announce_latest { + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(9) + .collect() + } else { + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(6) + .collect() + }; -#[xtest(feature = "_externalize_tests")] -pub fn htlc_claim_single_commitment_only_b() { - do_htlc_claim_previous_remote_commitment_only(true, false); - do_htlc_claim_previous_remote_commitment_only(false, false); - do_htlc_claim_previous_remote_commitment_only(true, true); - do_htlc_claim_previous_remote_commitment_only(false, true); -} + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); + } + check_added_monitors(&nodes[2], 3); -#[xtest(feature = "_externalize_tests")] -#[should_panic] -pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // Force duplicate randomness for every get-random call - for node in nodes.iter() { - *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]); + let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(cs_msgs.len(), 2); + let mut a_done = false; + for msg in cs_msgs { + match msg { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { + // Both under-dust HTLCs and the one above-dust HTLC that we had already failed + // should be failed-backwards here. + let target = if *node_id == node_a_id { + // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs + for htlc in &updates.update_fail_htlcs { + assert!( + htlc.htlc_id == 1 + || htlc.htlc_id == 2 || htlc.htlc_id == 6 + || if announce_latest { + htlc.htlc_id == 3 || htlc.htlc_id == 5 + } else { + false + } + ); + } + assert_eq!( + updates.update_fail_htlcs.len(), + if announce_latest { 5 } else { 3 } + ); + assert!(!a_done); + a_done = true; + &nodes[0] + } else { + // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs + for htlc in &updates.update_fail_htlcs { + assert!( + htlc.htlc_id == 1 + || htlc.htlc_id == 2 || htlc.htlc_id == 5 + || if announce_latest { htlc.htlc_id == 4 } else { false } + ); + } + assert_eq!(*node_id, node_b_id); + assert_eq!( + updates.update_fail_htlcs.len(), + if announce_latest { 4 } else { 3 } + ); + &nodes[1] + }; + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[1]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[2]); + if announce_latest { + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[3]); + if *node_id == node_a_id { + target + .node + .handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[4]); + } + } + commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true); + }, + _ => panic!("Unexpected event"), + } } - // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer. - let channel_value_satoshis=10000; - let push_msat=10001; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap(); - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel); - get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + let as_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 }); + let mut as_faileds = new_hash_set(); + let mut as_updates = 0; + for event in as_events.iter() { + if let &Event::PaymentPathFailed { + ref payment_hash, + ref payment_failed_permanently, + ref failure, + .. + } = event + { + assert!(as_faileds.insert(*payment_hash)); + if *payment_hash != hash_2 { + assert_eq!(*payment_failed_permanently, deliver_last_raa); + } else { + assert!(!payment_failed_permanently); + } + if let PathFailure::OnPath { network_update: Some(_) } = failure { + as_updates += 1; + } + } else if let &Event::PaymentFailed { .. } = event { + } else { + panic!("Unexpected event"); + } + } + assert!(as_faileds.contains(&hash_1)); + assert!(as_faileds.contains(&hash_2)); + if announce_latest { + assert!(as_faileds.contains(&hash_3)); + assert!(as_faileds.contains(&hash_5)); + } + assert!(as_faileds.contains(&hash_6)); - // Create a second channel with the same random values. This used to panic due to a colliding - // channel_id, but now panics due to a colliding outbound SCID alias. - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); + let bs_events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); + let mut bs_faileds = new_hash_set(); + let mut bs_updates = 0; + for event in bs_events.iter() { + if let &Event::PaymentPathFailed { + ref payment_hash, + ref payment_failed_permanently, + ref failure, + .. + } = event + { + assert!(bs_faileds.insert(*payment_hash)); + if *payment_hash != hash_1 && *payment_hash != hash_5 { + assert_eq!(*payment_failed_permanently, deliver_last_raa); + } else { + assert!(!payment_failed_permanently); + } + if let PathFailure::OnPath { network_update: Some(_) } = failure { + bs_updates += 1; + } + } else if let &Event::PaymentFailed { .. } = event { + } else { + panic!("Unexpected event"); + } + } + assert!(bs_faileds.contains(&hash_1)); + assert!(bs_faileds.contains(&hash_2)); + if announce_latest { + assert!(bs_faileds.contains(&hash_4)); + } + assert!(bs_faileds.contains(&hash_5)); + + // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should + // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to + // unknown-preimage-etc, B should have gotten 2. Thus, in the + // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates. + assert_eq!( + as_updates, + if deliver_last_raa { + 1 + } else if !announce_latest { + 3 + } else { + 5 + } + ); + assert_eq!( + bs_updates, + if deliver_last_raa { + 2 + } else if !announce_latest { + 3 + } else { + 4 + } + ); } #[xtest(feature = "_externalize_tests")] -pub fn bolt2_open_channel_sending_node_checks_part2() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis - let channel_value_satoshis=2^24; - let push_msat=10001; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); - - // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis - let channel_value_satoshis=10000; - // Test when push_msat is equal to 1000 * funding_satoshis. - let push_msat=1000*channel_value_satoshis+1; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); - - // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis - let channel_value_satoshis=10000; - let push_msat=10001; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis); - - // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0 - // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1 - assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1); - - // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver. - assert!(BREAKDOWN_TIMEOUT>0); - assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT); - - // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within. - let chain_hash = ChainHash::using_genesis_block(Network::Testnet); - assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash); - - // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys. - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok()); +pub fn test_fail_backwards_latest_remote_announce_a() { + do_test_fail_backwards_unrevoked_remote_announce(false, true); } #[xtest(feature = "_externalize_tests")] -pub fn bolt2_open_channel_sane_dust_limit() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let channel_value_satoshis=1000000; - let push_msat=10001; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap(); - let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; - node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; - - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - let err_msg = match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; - assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)"); +pub fn test_fail_backwards_latest_remote_announce_b() { + do_test_fail_backwards_unrevoked_remote_announce(true, true); } -// Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC -// originated from our node, its failure is surfaced to the user. We trigger this failure to -// free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC -// is no longer affordable once it's freed. #[xtest(feature = "_externalize_tests")] -pub fn test_fail_holding_cell_htlc_upon_free() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - // First nodes[0] generates an update_fee, setting the channel's - // pending_update_fee. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - - // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - - // Send a payment which passes reserve checks but gets stuck in the holding cell. - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); - - // Flush the pending fee update. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_revoke_and_ack); - check_added_monitors!(nodes[0], 1); - - // Upon receipt of the RAA, there will be an attempt to resend the holding cell - // HTLC, but now that the fee has been raised the payment will now fail, causing - // us to surface its failure to the user. - chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); - - // Check that the payment failed to be sent out. - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { - assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap()); - assert_eq!(our_payment_hash.clone(), *payment_hash); - assert_eq!(*payment_failed_permanently, false); - assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id)); - }, - _ => panic!("Unexpected event"), - } - match &events[1] { - &Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(Some(our_payment_hash), *payment_hash); - }, - _ => panic!("Unexpected event"), - } +pub fn test_fail_backwards_previous_remote_announce() { + do_test_fail_backwards_unrevoked_remote_announce(false, false); + // Note that true, true doesn't make sense as it implies we announce a revoked state, which is + // tested for in test_commitment_revoked_fail_backward_exhaustive() } -// Test that if multiple HTLCs are released from the holding cell and one is -// valid but the other is no longer valid upon release, the valid HTLC can be -// successfully completed while the other one fails as expected. #[xtest(feature = "_externalize_tests")] -pub fn test_free_and_fail_holding_cell_htlcs() { +pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - // First nodes[0] generates an update_fee, setting the channel's - // pending_update_fee. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 200; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - - let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - - // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let amt_1 = 20000; - let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1; - let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); - let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); - - // Send 2 payments which pass reserve checks but get stuck in the holding cell. - nodes[0].node.send_payment_with_route(route_1, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); - let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes()); - nodes[0].node.send_payment_with_route(route_2.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap(); - chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); - - // Flush the pending fee update. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_and_ack); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - check_added_monitors!(nodes[0], 2); - - // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, - // but now that the fee has been raised the second payment will now fail, causing us - // to surface its failure to the user. The first payment should succeed. - chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); - - // Check that the second payment failed to be sent out. - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { - assert_eq!(payment_id_2, *payment_id.as_ref().unwrap()); - assert_eq!(payment_hash_2.clone(), *payment_hash); - assert_eq!(*payment_failed_permanently, false); - assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id)); - }, - _ => panic!("Unexpected event"), - } - match &events[1] { - &Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(Some(payment_hash_2), *payment_hash); - }, - _ => panic!("Unexpected event"), - } + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - // Complete the first payment and the RAA from the fee update. - let (payment_event, send_raa_event) = { - let mut msgs = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(msgs.len(), 2); - (SendEvent::from_event(msgs.remove(0)), msgs.remove(0)) - }; - let raa = match send_raa_event { - MessageSendEvent::SendRevokeAndACK { msg, .. } => msg, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), - } - nodes[1].node.process_pending_htlc_forwards(); - let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentClaimable { .. } => {}, - _ => panic!("Unexpected event"), - } - nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); + let node_b_id = nodes[1].node.get_our_node_id(); - let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); - expect_payment_sent!(nodes[0], payment_preimage_1); -} + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); -// Test that if we fail to forward an HTLC that is being freed from the holding cell that the -// HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing -// our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable -// once it's freed. -#[xtest(feature = "_externalize_tests")] -pub fn test_fail_holding_cell_htlc_upon_free_multihop() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - // Avoid having to include routing fees in calculations - let mut config = test_default_channel_config(); - config.channel_config.forwarding_fee_base_msat = 0; - config.channel_config.forwarding_fee_proportional_millionths = 0; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9000000); + let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); + assert_eq!(local_txn[0].input.len(), 1); + check_spends!(local_txn[0], chan_1.3); - // First nodes[1] generates an update_fee, setting the channel's - // pending_update_fee. - { - let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[1].node.timer_tick_occurred(); - check_added_monitors!(nodes[1], 1); + // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx + mine_transaction(&nodes[0], &local_txn[0]); + check_closed_broadcast!(nodes[0], true); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), + let htlc_timeout = { + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn[0]); + node_txn[0].clone() }; - nodes[2].node.handle_update_fee(nodes[1].node.get_our_node_id(), update_msg.unwrap()); + mine_transaction(&nodes[0], &htlc_timeout); + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); + expect_payment_failed!(nodes[0], our_payment_hash, false); - let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); - let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2); + // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); + assert_eq!(spend_txn.len(), 3); + check_spends!(spend_txn[0], local_txn[0]); + assert_eq!(spend_txn[1].input.len(), 1); + check_spends!(spend_txn[1], htlc_timeout); + assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); + assert_eq!(spend_txn[2].input.len(), 2); + check_spends!(spend_txn[2], local_txn[0], htlc_timeout); + assert!( + spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 + || spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32 + ); +} - // Send a payment which passes reserve checks but gets stuck in the holding cell. - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); - let payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); +#[xtest(feature = "_externalize_tests")] +pub fn test_key_derivation_params() { + // This test is a copy of test_dynamic_spendable_outputs_local_htlc_timeout_tx, with a key + // manager rotation to test that `channel_keys_id` returned in + // [`SpendableOutputDescriptor::DelayedPaymentOutput`] let us re-derive the channel key set to + // then derive a `delayed_payment_key`. - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + let chanmon_cfgs = create_chanmon_cfgs(3); - SendEvent::from_event(events.remove(0)) + // We manually create the node configuration to backup the seed. + let seed = [42; 32]; + let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); + let chain_monitor = test_utils::TestChainMonitor::new( + Some(&chanmon_cfgs[0].chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &chanmon_cfgs[0].logger, + &chanmon_cfgs[0].fee_estimator, + &chanmon_cfgs[0].persister, + &keys_manager, + ); + let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); + let scorer = RwLock::new(test_utils::TestScorer::new()); + let router = + test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); + let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager); + let node = NodeCfg { + chain_source: &chanmon_cfgs[0].chain_source, + logger: &chanmon_cfgs[0].logger, + tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, + fee_estimator: &chanmon_cfgs[0].fee_estimator, + router, + message_router, + chain_monitor, + keys_manager: &keys_manager, + network_graph, + node_seed: seed, + override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)), }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[1]); + let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + node_cfgs.remove(0); + node_cfgs.insert(0, node); - chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); - assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - // Flush the pending fee update. - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); - let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[2], 1); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &commitment_signed); - check_added_monitors!(nodes[1], 2); + let node_b_id = nodes[1].node.get_our_node_id(); - // A final RAA message is generated to finalize the fee update. - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); + // Create some initial channels + // Create a dummy channel to advance index by one and thus test re-derivation correctness + // for node 0 + let chan_0 = create_announced_chan_between_nodes(&nodes, 0, 2); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey); - let raa_msg = match &events[0] { - &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => { - msg.clone() - }, - _ => panic!("Unexpected event"), - }; + // Ensure all nodes are at the same height + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa_msg); - check_added_monitors!(nodes[2], 1); - assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9000000); + let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2); + let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2); + assert_eq!(local_txn_1[0].input.len(), 1); + check_spends!(local_txn_1[0], chan_1.3); - // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. - let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(process_htlc_forwards_event.len(), 2); - match &process_htlc_forwards_event[1] { - &Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event"), + // We check funding pubkey are unique + let (from_0_funding_key_0, from_0_funding_key_1) = ( + PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), + PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]), + ); + let (from_1_funding_key_0, from_1_funding_key_1) = ( + PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), + PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]), + ); + if from_0_funding_key_0 == from_1_funding_key_0 + || from_0_funding_key_0 == from_1_funding_key_1 + || from_0_funding_key_1 == from_1_funding_key_0 + || from_0_funding_key_1 == from_1_funding_key_1 + { + panic!("Funding pubkeys aren't unique"); } - // In response, we call ChannelManager's process_pending_htlc_forwards - nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx + mine_transaction(&nodes[0], &local_txn_1[0]); + connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + check_closed_broadcast!(nodes[0], true); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - // This causes the HTLC to be failed backwards. - let fail_event = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(fail_event.len(), 1); - let (fail_msg, commitment_signed) = match &fail_event[0] { - &MessageSendEvent::UpdateHTLCs { ref updates, .. } => { - assert_eq!(updates.update_add_htlcs.len(), 0); - assert_eq!(updates.update_fulfill_htlcs.len(), 0); - assert_eq!(updates.update_fail_malformed_htlcs.len(), 0); - assert_eq!(updates.update_fail_htlcs.len(), 1); - (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone()) - }, - _ => panic!("Unexpected event"), + let htlc_timeout = { + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 1); + assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + check_spends!(node_txn[0], local_txn_1[0]); + node_txn[0].clone() }; - // Pass the failure messages back to nodes[0]. - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); + mine_transaction(&nodes[0], &htlc_timeout); + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); + expect_payment_failed!(nodes[0], our_payment_hash, false); - // Complete the HTLC failure+removal process. - let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); - check_added_monitors!(nodes[1], 2); - let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(final_raa_event.len(), 1); - let raa = match &final_raa_event[0] { - &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), - _ => panic!("Unexpected event"), - }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa); - expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false); - check_added_monitors!(nodes[0], 1); + // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor + let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); + let spend_txn = check_spendable_outputs!(nodes[0], new_keys_manager); + assert_eq!(spend_txn.len(), 3); + check_spends!(spend_txn[0], local_txn_1[0]); + assert_eq!(spend_txn[1].input.len(), 1); + check_spends!(spend_txn[1], htlc_timeout); + assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); + assert_eq!(spend_txn[2].input.len(), 2); + check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout); + assert!( + spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 + || spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32 + ); } #[xtest(feature = "_externalize_tests")] -pub fn test_payment_route_reaching_same_channel_twice() { - //A route should not go through the same channel twice - //It is enforced when constructing a route. +pub fn test_static_output_closing_tx() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - // Extend the path by itself, essentially simulating route going through same channel twice - let cloned_hops = route.paths[0].hops.clone(); - route.paths[0].hops.extend_from_slice(&cloned_hops); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), false, APIError::InvalidRoute { ref err }, - assert_eq!(err, &"Path went through the same channel twice")); - assert!(nodes[0].node.list_recent_payments().is_empty()); -} + send_payment(&nodes[0], &[&nodes[1]], 8000000); + let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; -// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. -// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. -//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. + mine_transaction(&nodes[0], &closing_tx); + let reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { - //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these) - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0].hops[0].fee_msat = 100; + mine_transaction(&nodes[1], &closing_tx); + let reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(spend_txn.len(), 1); + check_spends!(spend_txn[0], closing_tx); } -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { - //BOLT2 Requirement: MUST offer amount_msat greater than 0. +fn do_htlc_claim_local_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), - true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send 0-msat HTLC")); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); + + // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being + // present in B's local commitment transaction, but none of A's commitment transactions. + nodes[1].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); + + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); + check_added_monitors(&nodes[0], 1); + let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); + check_added_monitors(&nodes[1], 1); + + let starting_block = nodes[1].best_block_info(); + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); + for _ in starting_block.1 + 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + starting_block.1 + 2 { + connect_block(&nodes[1], &block); + block.header.prev_blockhash = block.block_hash(); + } + let htlc_type = if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }; + test_txn_broadcast(&nodes[1], &chan, None, htlc_type); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [node_a_id], 100000); } -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { - //BOLT2 Requirement: MUST offer amount_msat greater than 0. +fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = 0; + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + + // As far as A is concerned, the HTLC is now present only in the latest remote commitment + // transaction, however it is not in A's latest local commitment, so we can just broadcast that + // to "time out" the HTLC. + + let starting_block = nodes[1].best_block_info(); + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); + + for _ in + starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 + { + connect_block(&nodes[0], &block); + block.header.prev_blockhash = block.block_hash(); + } + test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); + check_closed_broadcast!(nodes[0], true); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); +} + +fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present + // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions. + // Also optionally test that we *don't* fail the channel in case the commitment transaction was + // actually revoked. + let htlc_value = if use_dust { 50000 } else { 3000000 }; + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); + nodes[1].node.fail_htlc_backwards(&our_payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); + check_added_monitors(&nodes[1], 1); + + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); + check_added_monitors(&nodes[0], 1); + let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); + check_added_monitors(&nodes[1], 1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.1); + check_added_monitors(&nodes[1], 1); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + if check_revoke_no_close { + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + check_added_monitors(&nodes[0], 1); + } - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); - check_closed_broadcast!(nodes[1], true).unwrap(); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, - [nodes[0].node.get_our_node_id()], 100000); + let starting_block = nodes[1].best_block_info(); + let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); + for _ in + starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 + { + connect_block(&nodes[0], &block); + block.header.prev_blockhash = block.block_hash(); + } + if !check_revoke_no_close { + test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); + check_closed_broadcast!(nodes[0], true); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); + } else { + expect_payment_failed!(nodes[0], our_payment_hash, true); + } } +// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window. +// There are only a few cases to test here: +// * its not really normative behavior, but we test that below-dust HTLCs "included" in +// broadcastable commitment transactions result in channel closure, +// * its included in an unrevoked-but-previous remote commitment transaction, +// * its included in the latest remote or local commitment transactions. +// We test each of the three possible commitment transactions individually and use both dust and +// non-dust HTLCs. +// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we +// assume they are handled the same across all six cases, as both outbound and inbound failures are +// tested for at least one of the cases in other tests. #[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { - //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000. - //It is enforced when constructing a route. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); +pub fn htlc_claim_single_commitment_only_a() { + do_htlc_claim_local_commitment_only(true); + do_htlc_claim_local_commitment_only(false); + + do_htlc_claim_current_remote_commitment_only(true); + do_htlc_claim_current_remote_commitment_only(false); +} - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); - route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::InvalidRoute { ref err }, - assert_eq!(err, &"Channel CLTV overflowed?")); +#[xtest(feature = "_externalize_tests")] +pub fn htlc_claim_single_commitment_only_b() { + do_htlc_claim_previous_remote_commitment_only(true, false); + do_htlc_claim_previous_remote_commitment_only(false, false); + do_htlc_claim_previous_remote_commitment_only(true, true); + do_htlc_claim_previous_remote_commitment_only(false, true); } #[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { - //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC. - //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0. - //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer. +#[should_panic] +pub fn bolt2_open_channel_sending_node_checks_part1() { + //This test needs to be on its own as we are catching a panic let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64; - - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - for i in 0..max_accepted_htlcs { - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - let payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { - assert_eq!(htlcs[0].htlc_id, i); - } else { - assert!(false); - } - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); + // Force duplicate randomness for every get-random call + for node in nodes.iter() { + *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]); } - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer. + let channel_value_satoshis = 10000; + let push_msat = 10001; + nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .unwrap(); + let node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + + // Create a second channel with the same random values. This used to panic due to a colliding + // channel_id, but now panics due to a colliding outbound SCID alias. + assert!(nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .is_err()); } #[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { - //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC. +pub fn bolt2_open_channel_sending_node_checks_part2() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let channel_value = 100000; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); - let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; - - send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); - - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); - // Manually create a route over our max in flight (which our router normally automatically - // limits us to. - route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - send_payment(&nodes[0], &[&nodes[1]], max_in_flight); -} + let node_b_id = nodes[1].node.get_our_node_id(); -// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message. -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { - //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let htlc_minimum_msat: u64; - { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); - htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat(); - } - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); -} + // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis + let channel_value_satoshis = 10000; + // Test when push_msat is equal to 1000 * funding_satoshis. + let push_msat = 1000 * channel_value_satoshis + 1; + assert!(nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .is_err()); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { - //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - - let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - - // Even though channel-initiator senders are required to respect the fee_spike_reserve, - // at this time channel-initiatee receivers are not required to enforce that senders - // respect the fee_spike_reserve. - updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver. + assert!(BREAKDOWN_TIMEOUT > 0); + assert!(node0_to_1_send_open_channel.common_fields.to_self_delay == BREAKDOWN_TIMEOUT); + + // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within. + let chain_hash = ChainHash::using_genesis_block(Network::Testnet); + assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash); } #[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { - //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel - //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash. +pub fn bolt2_open_channel_sane_dust_limit() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let send_amt = 3999999; - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 1000); - route.paths[0].hops[0].fee_msat = send_amt; - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); - let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); - - let mut msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 0, - amount_msat: 1000, - payment_hash: our_payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet.clone(), - skimmed_fee_msat: None, - blinding_point: None, - }; + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - for i in 0..50 { - msg.htlc_id = i as u64; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); - } - msg.htlc_id = (50) as u64; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); + let value_sats = 1000000; + let push_msat = 10001; + nodes[0].node.create_channel(node_b_id, value_sats, push_msat, 42, None, None).unwrap(); + let mut node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; + node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + let err_msg = match events[0] { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, .. + } => msg.clone(), + _ => panic!("Unexpected event"), + }; + assert_eq!( + err_msg.data, + "dust_limit_satoshis (547) is greater than the implementation limit (546)" + ); } +// Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC +// originated from our node, its failure is surfaced to the user. We trigger this failure to +// free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC +// is no longer affordable once it's freed. #[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { - //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel +pub fn test_fail_holding_cell_htlc_upon_free() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000); -} + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { - //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].cltv_expiry = 500000000; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); -} + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { - //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection. - // We test this by first testing that that repeated HTLCs pass commitment signature checks - // after disconnect and that non-sequential htlc_ids result in a channel failure. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - //Disconnect and Reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); - let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); - handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); - handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let max_can_send = + 5000000 - channel_reserve - 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - //Resend HTLC - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - assert_eq!(updates.commitment_signed.len(), 1); - assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); - let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + // Send a payment which passes reserve checks but gets stuck in the holding cell. + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); + check_added_monitors(&nodes[0], 1); - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + // Upon receipt of the RAA, there will be an attempt to resend the holding cell + // HTLC, but now that the fee has been raised the payment will now fail, causing + // us to surface its failure to the user. + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log( + "lightning::ln::channel", + format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), + 1, + ); + + // Check that the payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match &events[0] { + &Event::PaymentPathFailed { + ref payment_id, + ref payment_hash, + ref payment_failed_permanently, + failure: PathFailure::OnPath { network_update: None }, + ref short_channel_id, + .. + } => { + assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap()); + assert_eq!(our_payment_hash.clone(), *payment_hash); + assert_eq!(*payment_failed_permanently, false); + assert_eq!(*short_channel_id, Some(route.paths[0].hops[0].short_channel_id)); + }, + _ => panic!("Unexpected event"), + } + match &events[1] { + &Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(Some(our_payment_hash), *payment_hash); + }, + _ => panic!("Unexpected event"), + } } +// Test that if multiple HTLCs are released from the holding cell and one is +// valid but the other is no longer valid upon release, the valid HTLC can be +// successfully completed while the other one fails as expected. #[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. - +pub fn test_free_and_fail_holding_cell_htlcs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - let update_msg = msgs::UpdateFulfillHTLC{ - channel_id: chan.2, - htlc_id: 0, - payment_preimage: our_payment_preimage, - }; - - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_msg); - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); -} + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 200; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - - let update_msg = msgs::UpdateFailHTLC{ - channel_id: chan.2, - htlc_id: 0, - reason: Vec::new(), - attribution_data: Some(AttributionData::new()) + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), }; - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_msg); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); -} + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let amt_1 = 20000; + let amt_2 = 5000000 + - channel_reserve + - 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) + - amt_1; + let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); + let (route_2, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + // Send 2 payments which pass reserve checks but get stuck in the holding cell. + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route_1, payment_hash_1, onion, id_1).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - let update_msg = msgs::UpdateFailMalformedHTLC{ - channel_id: chan.2, - htlc_id: 0, - sha256_of_onion: [1; 32], - failure_code: 0x8000, - }; + let id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + nodes[0].node.send_payment_with_route(route_2.clone(), payment_hash_2, onion, id_2).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); + + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_and_ack); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + check_added_monitors(&nodes[0], 2); + + // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, + // but now that the fee has been raised the second payment will now fail, causing us + // to surface its failure to the user. The first payment should succeed. + chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log( + "lightning::ln::channel", + format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), + 1, + ); + + // Check that the second payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match &events[0] { + &Event::PaymentPathFailed { + ref payment_id, + ref payment_hash, + ref payment_failed_permanently, + failure: PathFailure::OnPath { network_update: None }, + ref short_channel_id, + .. + } => { + assert_eq!(id_2, *payment_id.as_ref().unwrap()); + assert_eq!(payment_hash_2.clone(), *payment_hash); + assert_eq!(*payment_failed_permanently, false); + assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id)); + }, + _ => panic!("Unexpected event"), + } + match &events[1] { + &Event::PaymentFailed { ref payment_hash, .. } => { + assert_eq!(Some(payment_hash_2), *payment_hash); + }, + _ => panic!("Unexpected event"), + } - nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &update_msg); + // Complete the first payment and the RAA from the fee update. + let (payment_event, send_raa_event) = { + let mut msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msgs.len(), 2); + (SendEvent::from_event(msgs.remove(0)), msgs.remove(0)) + }; + let raa = match send_raa_event { + MessageSendEvent::SendRevokeAndACK { msg, .. } => msg, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + check_added_monitors(&nodes[1], 1); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.process_pending_htlc_forwards(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentClaimable { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.claim_funds(payment_preimage_1); + check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + let update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msgs.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); + expect_payment_sent!(nodes[0], payment_preimage_1); } +// Test that if we fail to forward an HTLC that is being freed from the holding cell that the +// HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing +// our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable +// once it's freed. #[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { - //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. +pub fn test_fail_holding_cell_htlc_upon_free_multihop() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + // Avoid having to include routing fees in calculations + let mut config = test_default_channel_config(); + config.channel_config.forwarding_fee_base_msat = 0; + config.channel_config.forwarding_fee_proportional_millionths = 0; + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config.clone())], + ); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); - nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + // First nodes[1] generates an update_fee, setting the channel's + // pending_update_fee. + { + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[1].node.timer_tick_occurred(); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - update_fulfill_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), }; - update_fulfill_msg.htlc_id = 1; + nodes[2].node.handle_update_fee(node_b_id, update_msg.unwrap()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_msg); + let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2); - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); -} + // Send a payment which passes reserve checks but gets stuck in the holding cell. + let max_can_send = + 5000000 - channel_reserve - 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); + let payment_event = { + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { - //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + chan_stat = get_channel_value_stat!(nodes[1], nodes[2], chan_1_2.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); - nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + // Flush the pending fee update. + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + check_added_monitors(&nodes[2], 1); + nodes[1].node.handle_revoke_and_ack(node_c_id, &raa); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &commitment_signed); + check_added_monitors(&nodes[1], 2); + // A final RAA message is generated to finalize the fee update. let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - update_fulfill_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } - }; - - update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); - - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { - //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + let raa_msg = match &events[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), + _ => panic!("Unexpected event"), + }; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + nodes[2].node.handle_revoke_and_ack(node_b_id, &raa_msg); + check_added_monitors(&nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message + // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. + let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(process_htlc_forwards_event.len(), 2); + match &process_htlc_forwards_event[1] { + &Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - check_added_monitors!(nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + // In response, we call ChannelManager's process_pending_htlc_forwards + nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - - let mut update_msg: msgs::UpdateFailMalformedHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fulfill_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert_eq!(update_fail_malformed_htlcs.len(), 1); - assert!(update_fee.is_none()); - update_fail_malformed_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } + // This causes the HTLC to be failed backwards. + let fail_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_event.len(), 1); + let (fail_msg, commitment_signed) = match &fail_event[0] { + &MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + assert_eq!(updates.update_add_htlcs.len(), 0); + assert_eq!(updates.update_fulfill_htlcs.len(), 0); + assert_eq!(updates.update_fail_malformed_htlcs.len(), 0); + assert_eq!(updates.update_fail_htlcs.len(), 1); + (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), }; - update_msg.failure_code &= !0x8000; - nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &update_msg); - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000); + // Pass the failure messages back to nodes[0]. + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + + // Complete the HTLC failure+removal process. + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); + check_added_monitors(&nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); + check_added_monitors(&nodes[1], 2); + let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(final_raa_event.len(), 1); + let raa = match &final_raa_event[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), + _ => panic!("Unexpected event"), + }; + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); + expect_payment_failed_with_update!( + nodes[0], + our_payment_hash, + false, + chan_1_2.0.contents.short_channel_id, + false + ); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -7309,44 +5909,65 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100000); //First hop let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); //Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); - let update_msg : (msgs::UpdateFailMalformedHTLC, Vec) = { + let update_msg: (msgs::UpdateFailMalformedHTLC, Vec) = { match events_3[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -7358,17 +5979,31 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ } }; - nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), &update_msg.0); + nodes[1].node.handle_update_fail_malformed_htlc(node_c_id, &update_msg.0); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route match events_4[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -7378,7 +6013,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ _ => panic!("Unexpected event"), }; - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -7387,33 +6022,43 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); // First hop let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); SendEvent::from_node(&nodes[0]) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); payment_event = SendEvent::from_node(&nodes[1]); assert_eq!(payment_event.msgs.len(), 1); // Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -7424,22 +6069,23 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error) update_msg.failure_code |= 0x2000; - nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), &update_msg); + nodes[1].node.handle_update_fail_malformed_htlc(node_c_id, &update_msg); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true); }, _ => panic!("Unexpected event"), } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); match events_4[0] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); }, _ => panic!("Unexpected event"), @@ -7451,11 +6097,18 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between // the node originating the error to its next hop. match events_5[0] { - Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, .. + Event::PaymentPathFailed { + error_code, + failure: + PathFailure::OnPath { + network_update: + Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }), + }, + .. } => { assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id); assert!(is_permanent); - assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4)); + assert_eq!(error_code, Some(0x8000 | 0x4000 | 0x2000 | 4)); }, _ => panic!("Unexpected event"), } @@ -7479,14 +6132,23 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan =create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan.2); + chan.context().holder_dust_limit_satoshis + }; // We route 2 dust-HTLCs between A and B - let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); - let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); + let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); + let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); route_payment(&nodes[0], &[&nodes[1]], 1000000); // Cache one local commitment tx as previous @@ -7494,14 +6156,17 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); - check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 0); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] + ); + check_added_monitors(&nodes[1], 1); - let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &remove.commitment_signed); - check_added_monitors!(nodes[0], 1); + let remove = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &remove.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &remove.commitment_signed); + check_added_monitors(&nodes[0], 1); // Cache one local commitment tx as lastest let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2); @@ -7509,13 +6174,13 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let events = nodes[0].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::SendRevokeAndACK { node_id, .. } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); }, _ => panic!("Unexpected event"), } match events[1] { MessageSendEvent::UpdateHTLCs { node_id, .. } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); }, _ => panic!("Unexpected event"), } @@ -7529,8 +6194,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -7548,7 +6213,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { assert_eq!(payment_hash, payment_hash_2); } }, - Event::PaymentFailed { .. } => {} + Event::PaymentFailed { .. } => {}, _ => panic!("Unexpected event"), } } @@ -7572,12 +6237,21 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; + let bs_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan.2); + chan.context().holder_dust_limit_satoshis + }; - let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); + let (_payment_preimage_1, dust_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2); @@ -7586,23 +6260,26 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { // We revoked bs_commitment_tx if revoked { let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } let mut timeout_tx = Vec::new(); if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_hash, false); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone()); - assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + timeout_tx[0].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); mine_transaction(&nodes[0], &timeout_tx[0]); @@ -7612,20 +6289,29 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..) - .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].compute_txid()).collect(); + timeout_tx = nodes[0] + .tx_broadcaster + .txn_broadcasted + .lock() + .unwrap() + .drain(..) + .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].compute_txid()) + .collect(); check_spends!(timeout_tx[0], bs_commitment_tx[0]); // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the // dust HTLC should have been failed. expect_payment_failed!(nodes[0], dust_hash, false); if !revoked { - assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + timeout_tx[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); } else { assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11); } @@ -7657,62 +6343,120 @@ pub fn test_user_configurable_csv_delay() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let logger = TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() - if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, - &low_our_to_self_config, 0, 42, None, &logger) - { + if let Err(error) = OutboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[1].node.init_features(), + 1000000, + 1000000, + 0, + &low_our_to_self_config, + 0, + 42, + None, + &logger, + ) { match error { - APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, + APIError::APIMisuseError { err } => { + assert!(regex::Regex::new( + r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks" + ) + .unwrap() + .is_match(err.as_str())); + }, _ => panic!("Unexpected event"), } - } else { assert!(false) } + } else { + assert!(false) + } // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) - { + if let Err(error) = InboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[0].node.channel_type_features(), + &nodes[1].node.init_features(), + &open_channel, + 0, + &low_our_to_self_config, + 0, + &nodes[0].logger, + /*is_0conf=*/ false, + ) { match error { ChannelError::Close((err, _)) => { - let regex = regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap(); + let regex = regex::Regex::new( + r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks", + ) + .unwrap(); assert!(regex.is_match(err.as_str())); }, _ => panic!("Unexpected event"), } - } else { assert!(false); } + } else { + assert!(false); + } // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + + let mut accept_channel = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.to_self_delay = 200; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let reason_msg; - if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { + if let MessageSendEvent::HandleError { ref action, .. } = + nodes[0].node.get_and_clear_pending_msg_events()[0] + { match action { &ErrorAction::SendErrorMessage { ref msg } => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str())); reason_msg = msg.data.clone(); }, - _ => { panic!(); } + _ => { + panic!(); + }, } - } else { panic!(); } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000); + } else { + panic!(); + } + let reason = ClosureReason::ProcessingError { err: reason_msg }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) - { + if let Err(error) = InboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[0].node.channel_type_features(), + &nodes[1].node.init_features(), + &open_channel, + 0, + &high_their_to_self_config, + 0, + &nodes[0].logger, + /*is_0conf=*/ false, + ) { match error { ChannelError::Close((err, _)) => { let regex = regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap(); @@ -7720,7 +6464,9 @@ pub fn test_user_configurable_csv_delay() { }, _ => panic!("Unexpected event"), } - } else { assert!(false); } + } else { + assert!(false); + } } #[xtest(feature = "_externalize_tests")] @@ -7734,32 +6480,53 @@ pub fn test_check_htlc_underpaying() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000); - let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), - None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap(); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let our_payment_secret = nodes[1] + .node + .create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None) + .unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -7768,7 +6535,18 @@ pub fn test_check_htlc_underpaying() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_fail_htlc, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -7778,15 +6556,16 @@ pub fn test_check_htlc_underpaying() { }, _ => panic!("Unexpected event"), }; - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlc); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); + let reason = LocalHTLCFailureReason::IncorrectPaymentDetails; + expect_payment_failed!(nodes[0], our_payment_hash, true, reason, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -7799,6 +6578,9 @@ pub fn test_announce_disable_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Connect a dummy node for proper future events broadcasting connect_dummy_node(&nodes[0]); @@ -7807,8 +6589,8 @@ pub fn test_announce_disable_channels() { create_announced_chan_between_nodes(&nodes, 0, 1); // Disconnect peers - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); for _ in 0..DISABLE_GOSSIP_TICKS + 1 { nodes[0].node.timer_tick_occurred(); @@ -7819,9 +6601,12 @@ pub fn test_announce_disable_channels() { for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - assert_eq!(msg.contents.channel_flags & (1<<1), 1<<1); // The "channel disabled" bit should be set - // Check that each channel gets updated exactly once - if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() { + assert_eq!(msg.contents.channel_flags & (1 << 1), 1 << 1); // The "channel disabled" bit should be set + // Check that each channel gets updated exactly once + if chans_disabled + .insert(msg.contents.short_channel_id, msg.contents.timestamp) + .is_some() + { panic!("Generated ChannelUpdate for wrong chan!"); } }, @@ -7829,31 +6614,32 @@ pub fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); // Reestablish chan_1 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_2 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[1]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[1]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[1]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[1]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_3 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[2]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[2]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[2]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[2]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); for _ in 0..ENABLE_GOSSIP_TICKS { @@ -7866,7 +6652,7 @@ pub fn test_announce_disable_channels() { for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - assert_eq!(msg.contents.channel_flags & (1<<1), 0); // The "channel disabled" bit should be off + assert_eq!(msg.contents.channel_flags & (1 << 1), 0); // The "channel disabled" bit should be off match chans_disabled.remove(&msg.contents.short_channel_id) { // Each update should have a higher timestamp than the previous one, replacing // the old one. @@ -7891,13 +6677,16 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); - let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); - send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; + let payment_params = PaymentParameters::from_node_id(node_a_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[0].node.bolt11_invoice_features()) + .unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); + send_along_route(&nodes[1], route, &[&nodes[0]], 3000000); let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2); // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC @@ -7909,9 +6698,9 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let header_114 = connect_blocks(&nodes[1], 14); // Actually revoke tx by claiming a HTLC - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()])); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); macro_rules! check_broadcasted_txn { ($penalty_txids:ident, $fee_rates:ident) => { @@ -7929,8 +6718,15 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { assert!(tx.input.len() == 1 || tx.input.len() == 2); assert_eq!(tx.output.len(), 1); check_spends!(tx, revoked_txn[0]); - let total_input: u64 = tx.input.iter().map(|i| revoked_txn[0].output[i.previous_output.vout as usize].value.to_sat()).sum(); - let fee_rate: u64 = (total_input - tx.output[0].value.to_sat()) * 1000 / tx.weight().to_wu(); + let total_input: u64 = tx + .input + .iter() + .map(|i| { + revoked_txn[0].output[i.previous_output.vout as usize].value.to_sat() + }) + .sum(); + let fee_rate: u64 = + (total_input - tx.output[0].value.to_sat()) * 1000 / tx.weight().to_wu(); assert_ne!(fee_rate, 0); for input in &tx.input { $fee_rates.insert(input.previous_output, fee_rate); @@ -7941,7 +6737,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { assert_eq!($penalty_txids.len(), 3); node_txn.clear(); } - } + }; } // One or more justice tx should have been broadcast, check it. @@ -7987,20 +6783,44 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, 50) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50) - .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_a_id, 50) + .with_bolt11_features(nodes[0].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_b_id, + &route_params, + &nodes[1].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1; let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -8008,13 +6828,16 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); // Revoke local commitment tx - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]), + ); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = { @@ -8037,12 +6860,19 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let hash_128 = connect_blocks(&nodes[0], 40); let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]); connect_block(&nodes[0], &block_11); - let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); + let block_129 = create_dummy_block( + block_11.block_hash(), + 42, + vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()], + ); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); + expect_pending_htlcs_forwardable_conditions( + events[0..2].to_vec(), + &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }], + ); match events.last().unwrap() { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } let first; @@ -8072,8 +6902,14 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[1].previous_output); assert_ne!(node_txn[1].input[0].previous_output, node_txn[1].input[1].previous_output); - assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); - assert_eq!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output); + assert_eq!( + node_txn[1].input[0].previous_output, + revoked_htlc_txn[1].input[0].previous_output + ); + assert_eq!( + node_txn[1].input[1].previous_output, + revoked_htlc_txn[0].input[0].previous_output + ); // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one // output, checked above). @@ -8083,7 +6919,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { first = node_txn[2].compute_txid(); // Store both feerates for later comparison - let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[2].output[0].value; + let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value + - node_txn[2].output[0].value; feerate_1 = fee_1 * 1000 / node_txn[2].weight().to_wu(); penalty_txn = vec![node_txn[0].clone()]; node_txn.clear(); @@ -8107,7 +6944,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]); // Verify bumped tx is different and 25% bump heuristic assert_ne!(first, node_txn[0].compute_txid()); - let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value; + let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value + - node_txn[0].output[0].value; let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu(); assert!(feerate_2 * 100 > feerate_1 * 125); let txn = vec![node_txn[0].clone()]; @@ -8129,7 +6967,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { node_txn.clear(); } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -8152,8 +6990,9 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { let htlc_value_b_msats = 583_000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); - route_payment(&nodes[1], &vec!(&nodes[0])[..], htlc_value_b_msats); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); + route_payment(&nodes[1], &[&nodes[0]], htlc_value_b_msats); // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -8165,8 +7004,9 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); mine_transaction(&nodes[1], &remote_txn[0]); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs remote_txn @@ -8192,14 +7032,16 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { preimage = node_txn[0].compute_txid(); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - node_txn[0].output[0].value.to_sat(); feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu(); - let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { - (node_txn[2].clone(), node_txn[1].clone()) - } else { - (node_txn[1].clone(), node_txn[2].clone()) - }; + let (preimage_bump_tx, timeout_tx) = + if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { + (node_txn[2].clone(), node_txn[1].clone()) + } else { + (node_txn[1].clone(), node_txn[2].clone()) + }; preimage_bump = preimage_bump_tx; check_spends!(preimage_bump, remote_txn[0]); @@ -8207,7 +7049,8 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { timeout = timeout_tx.compute_txid(); let index = timeout_tx.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - timeout_tx.output[0].value.to_sat(); feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu(); node_txn.clear(); @@ -8226,13 +7069,15 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { check_spends!(preimage_bump, remote_txn[0]); let index = preimage_bump.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - preimage_bump.output[0].value.to_sat(); let new_feerate = fee * 1000 / preimage_bump.weight().to_wu(); assert!(new_feerate * 100 > feerate_timeout * 125); assert_ne!(timeout, preimage_bump.compute_txid()); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - node_txn[0].output[0].value.to_sat(); let new_feerate = fee * 1000 / node_txn[0].weight().to_wu(); assert!(new_feerate * 100 > feerate_preimage * 125); assert_ne!(preimage, node_txn[0].compute_txid()); @@ -8258,43 +7103,53 @@ pub fn test_counterparty_raa_skip_no_crash() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let per_commitment_secret; let next_per_commitment_point; { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); - let keys = guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap() - .get_signer(); + let mut guard = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let keys = + guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Make signer believe we got a counterparty signature, so that it allows the revocation keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); + per_commitment_secret = + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); // Must revoke without gaps keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1).unwrap(); keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap()); + let sec = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap(); + let key = SecretKey::from_slice(&sec).unwrap(); + next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), &key); } - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), - &msgs::RevokeAndACK { - channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, - }); - assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() } - , [nodes[0].node.get_our_node_id()], 100000); + let raa = msgs::RevokeAndACK { + channel_id, + per_commitment_secret, + next_per_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + assert_eq!( + check_closed_broadcast!(nodes[1], true).unwrap().data, + "Received an unexpected revoke_and_ack" + ); + check_added_monitors(&nodes[1], 1); + let reason = + ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -8307,27 +7162,32 @@ pub fn test_bump_txn_sanitize_tracking_maps() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions - let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000); - let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000); + let (payment_preimage_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); + let (_, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 9_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); // Revoke local commitment tx - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[0], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] + ); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 1000000); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); //ChannelMonitor: justice txn * 2 @@ -8360,7 +7220,10 @@ pub fn test_channel_conf_timeout() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000); + let node_a_id = nodes[0].node.get_our_node_id(); + + let _funding_tx = + create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000); // The outbound node should wait forever for confirmation: // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is @@ -8370,18 +7233,24 @@ pub fn test_channel_conf_timeout() { // The inbound node should fail the channel after exactly 2016 blocks connect_blocks(&nodes[1], 2015); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000); + check_added_monitors(&nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [node_a_id], 1000000); let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); match close_ev[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks"); + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { ref msg }, + ref node_id, + } => { + assert_eq!(*node_id, node_a_id); + assert_eq!( + msg.as_ref().unwrap().data, + "Channel closed because funding transaction failed to confirm within 2016 blocks" + ); }, _ => panic!("Unexpected event"), } @@ -8394,14 +7263,19 @@ pub fn test_override_channel_config() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Node0 initiates a channel to node1 using the override config. let mut override_config = UserConfig::default(); override_config.channel_handshake_config.our_to_self_delay = 200; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(override_config)) + .unwrap(); // Assert the channel created by node0 is using the override config. - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.channel_flags, 0); assert_eq!(res.common_fields.to_self_delay, 200); } @@ -8415,12 +7289,18 @@ pub fn test_override_0msat_htlc_minimum() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0] + .node + .create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(zero_config)) + .unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); - let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &res); + let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); } @@ -8434,20 +7314,34 @@ pub fn test_channel_update_has_correct_htlc_maximum_msat() { let mut config_30_percent = UserConfig::default(); config_30_percent.channel_handshake_config.announce_for_forwarding = true; - config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30; + config_30_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 30; let mut config_50_percent = UserConfig::default(); config_50_percent.channel_handshake_config.announce_for_forwarding = true; - config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50; + config_50_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 50; let mut config_95_percent = UserConfig::default(); config_95_percent.channel_handshake_config.announce_for_forwarding = true; - config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95; + config_95_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 95; let mut config_100_percent = UserConfig::default(); config_100_percent.channel_handshake_config.announce_for_forwarding = true; - config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + config_100_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 100; let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]); + let configs = [ + Some(config_30_percent), + Some(config_50_percent), + Some(config_95_percent), + Some(config_100_percent), + ]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let channel_value_satoshis = 100000; @@ -8456,8 +7350,10 @@ pub fn test_channel_update_has_correct_htlc_maximum_msat() { let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64; let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64; - let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001); - let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001); + let (node_0_chan_update, node_1_chan_update, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001); + let (node_2_chan_update, node_3_chan_update, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001); // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`. @@ -8484,13 +7380,20 @@ pub fn test_manually_accept_inbound_channel_request() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // accepting the inbound channel request. @@ -8517,8 +7420,12 @@ pub fn test_manually_accept_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23, Some(config_overrides)).unwrap(); - } + let config = Some(config_overrides); + nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, config) + .unwrap(); + }, _ => panic!("Unexpected event"), } @@ -8528,30 +7435,34 @@ pub fn test_manually_accept_inbound_channel_request() { let ref accept_channel: AcceptChannel; match accept_msg_ev[0] { MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); // Assert overriden handshake parameter. assert_eq!(msg.common_fields.max_accepted_htlcs, 3); accept_channel = msg; - } + }, _ => panic!("Unexpected event"), } // Continue channel opening process until channel update messages are sent. - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); - check_added_monitors!(nodes[0], 0); - - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); - - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temp_channel_id, tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); + check_added_monitors(&nodes[0], 0); + + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); + + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); + check_added_monitors(&nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { @@ -8563,7 +7474,7 @@ pub fn test_manually_accept_inbound_channel_request() { }; match &events[1] { crate::events::Event::ChannelPending { counterparty_node_id, .. } => { - assert_eq!(*&nodes[1].node.get_our_node_id(), *counterparty_node_id); + assert_eq!(*&node_b_id, *counterparty_node_id); }, _ => panic!("Unexpected event"), }; @@ -8571,19 +7482,19 @@ pub fn test_manually_accept_inbound_channel_request() { mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); - let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[0].node.handle_channel_ready(node_b_id, &as_channel_ready); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); + expect_channel_ready_event(&nodes[1], &node_a_id); // Assert that the overriden base fee surfaces in the channel update. - let channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + let channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); assert_eq!(channel_update.contents.fee_base_msat, 555); - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -8592,23 +7503,33 @@ pub fn test_manually_reject_inbound_channel_request() { manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // rejecting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let error_message = "Channel force-closed"; + let err = "Channel force-closed".to_string(); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); - } + nodes[1] + .node + .force_close_broadcasting_latest_txn(&temporary_channel_id, &node_a_id, err) + .unwrap(); + }, _ => panic!("Unexpected event"), } @@ -8617,8 +7538,8 @@ pub fn test_manually_reject_inbound_channel_request() { match close_msg_ev[0] { MessageSendEvent::HandleError { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - } + assert_eq!(*node_id, node_a_id); + }, _ => panic!("Unexpected event"), } @@ -8632,13 +7553,20 @@ pub fn test_can_not_accept_inbound_channel_twice() { manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // accepting the inbound channel request. @@ -8647,8 +7575,12 @@ pub fn test_can_not_accept_inbound_channel_twice() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); - let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None); + nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) + .unwrap(); + let api_res = + nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None); match api_res { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, "No such channel awaiting to be accepted."); @@ -8656,7 +7588,7 @@ pub fn test_can_not_accept_inbound_channel_twice() { Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"), Err(e) => panic!("Unexpected Error {:?}", e), } - } + }, _ => panic!("Unexpected event"), } @@ -8666,8 +7598,8 @@ pub fn test_can_not_accept_inbound_channel_twice() { match accept_msg_ev[0] { MessageSendEvent::SendAcceptChannel { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - } + assert_eq!(*node_id, node_a_id); + }, _ => panic!("Unexpected event"), } } @@ -8679,8 +7611,10 @@ pub fn test_can_not_accept_unknown_inbound_channel() { let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); let nodes = create_network(2, &node_cfg, &node_chanmgr); + let node_b_id = nodes[1].node.get_our_node_id(); + let unknown_channel_id = ChannelId::new_zero(); - let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0, None); + let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &node_b_id, 0, None); match api_res { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, "No such channel awaiting to be accepted."); @@ -8703,6 +7637,10 @@ pub fn test_onion_value_mpp_set_calculation() { let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]); let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -8710,72 +7648,97 @@ pub fn test_onion_value_mpp_set_calculation() { let total_msat = 100_000; let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); + let (mut route, hash, preimage, secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); let sample_path = route.paths.pop().unwrap(); let mut path_1 = sample_path.clone(); - path_1.hops[0].pubkey = nodes[1].node.get_our_node_id(); + path_1.hops[0].pubkey = node_b_id; path_1.hops[0].short_channel_id = chan_1_id; - path_1.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_1.hops[1].pubkey = node_d_id; path_1.hops[1].short_channel_id = chan_3_id; path_1.hops[1].fee_msat = 100_000; route.paths.push(path_1); let mut path_2 = sample_path.clone(); - path_2.hops[0].pubkey = nodes[2].node.get_our_node_id(); + path_2.hops[0].pubkey = node_c_id; path_2.hops[0].short_channel_id = chan_2_id; - path_2.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_2.hops[1].pubkey = node_d_id; path_2.hops[1].short_channel_id = chan_4_id; path_2.hops[1].fee_msat = 1_000; route.paths.push(path_2); // Send payment - let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); - let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap(); - nodes[0].node.test_send_payment_internal(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); - check_added_monitors!(nodes[0], expected_paths.len()); + let id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(secret); + let onion_session_privs = + nodes[0].node.test_add_new_pending_payment(hash, onion.clone(), id, &route).unwrap(); + let amt = Some(total_msat); + nodes[0] + .node + .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) + .unwrap(); + check_added_monitors(&nodes[0], expected_paths.len()); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); // First path - let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events); + let ev = + remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events); let mut payment_event = SendEvent::from_event(ev); let mut prev_node = &nodes[0]; for (idx, &node) in expected_paths[0].iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); - if idx == 0 { // routing node + if idx == 0 { + // routing node let session_priv = [3; 32]; let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); - let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); - let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, - &recipient_onion_fields, height + 1, &None, None, None).unwrap(); + let mut onion_keys = onion_utils::construct_onion_keys( + &Secp256k1::new(), + &route.paths[0], + &session_priv, + ); + let recipient_onion_fields = RecipientOnionFields::secret_only(secret); + let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads( + &route.paths[0], + 100_000, + &recipient_onion_fields, + height + 1, + &None, + None, + None, + ) + .unwrap(); // Edit amt_to_forward to simulate the sender having set // the final amount and the routing node taking less fee if let msgs::OutboundOnionPayload::Receive { - ref mut sender_intended_htlc_amt_msat, .. - } = onion_payloads[1] { + ref mut sender_intended_htlc_amt_msat, + .. + } = onion_payloads[1] + { *sender_intended_htlc_amt_msat = 99_000; - } else { panic!() } - let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); + } else { + panic!() + } + let new_onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &hash) + .unwrap(); payment_event.msgs[0].onion_routing_packet = new_onion_packet; } node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(node); if idx == 0 { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } else { @@ -8787,16 +7750,14 @@ pub fn test_onion_value_mpp_set_calculation() { } // Second path - let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None); + let ev = + remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); + pass_along_path(&nodes[0], expected_paths[1], 101_000, hash, Some(secret), ev, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], expected_paths, preimage)); } fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { - let routing_node_count = msat_amounts.len(); let node_count = routing_node_count + 2; @@ -8814,18 +7775,23 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { let mut dst_chan_ids = Vec::with_capacity(routing_node_count); for i in 0..routing_node_count { let routing_node = 2 + i; - let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id; + let src_chan = create_announced_chan_between_nodes(&nodes, src_idx, routing_node); + let src_chan_id = src_chan.0.contents.short_channel_id; src_chan_ids.push(src_chan_id); - let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id; + + let dst_chan = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx); + let dst_chan_id = dst_chan.0.contents.short_channel_id; dst_chan_ids.push(dst_chan_id); let path = vec![&nodes[routing_node], &nodes[dst_idx]]; expected_paths.push(path); } - let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect(); + let expected_paths: Vec<&[&Node]> = + expected_paths.iter().map(|route| route.as_slice()).collect(); // Create a route for each amount let example_amount = 100000; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); + let (mut route, hash, preimage, secret) = + get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); let sample_path = route.paths.pop().unwrap(); for i in 0..routing_node_count { let routing_node = 2 + i; @@ -8839,28 +7805,42 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { } // Send payment with manually set total_msat - let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); - let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap(); - nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); - check_added_monitors!(nodes[src_idx], expected_paths.len()); + let id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(secret); + let onion_session_privs = + nodes[src_idx].node.test_add_new_pending_payment(hash, onion, id, &route).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let amt = Some(total_msat); + nodes[src_idx] + .node + .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) + .unwrap(); + check_added_monitors(&nodes[src_idx], expected_paths.len()); let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); let mut amount_received = 0; for (path_idx, expected_path) in expected_paths.iter().enumerate() { - let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events); + let ev = + remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events); let current_path_amount = msat_amounts[path_idx]; amount_received += current_path_amount; - let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat; - pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None); + let became_claimable_now = + amount_received >= total_msat && amount_received - current_path_amount < total_msat; + pass_along_path( + &nodes[src_idx], + expected_path, + amount_received, + hash.clone(), + Some(secret), + ev, + became_claimable_now, + None, + ); } - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, preimage)); } #[xtest(feature = "_externalize_tests")] @@ -8877,24 +7857,27 @@ pub fn test_simple_mpp() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], paths, payment_preimage)); } #[xtest(feature = "_externalize_tests")] @@ -8905,17 +7888,22 @@ pub fn test_preimage_storage() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap(); + let (payment_hash, payment_secret) = + nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } // Note that after leaving the above scope we have no knowledge of any arguments or return @@ -8924,13 +7912,11 @@ pub fn test_preimage_storage() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentClaimable { ref purpose, .. } => { - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => { - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } + Event::PaymentClaimable { ref purpose, .. } => match &purpose { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => { + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), }, _ => panic!("Unexpected event"), } @@ -8944,62 +7930,74 @@ pub fn test_bad_secret_hash() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; - let random_payment_hash = PaymentHash([42; 32]); - let random_payment_secret = PaymentSecret([43; 32]); - let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap(); + let random_hash = PaymentHash([42; 32]); + let random_secret = PaymentSecret([43; 32]); + let (our_payment_hash, our_payment_secret) = + nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // All the below cases should end up being handled exactly identically, so we macro the // resulting events. macro_rules! handle_unknown_invalid_payment_data { ($payment_hash: expr) => { - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive{ payment_hash: $payment_hash }]); - check_added_monitors!(nodes[1], 1); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }] + ); + check_added_monitors(&nodes[1], 1); // We should fail the payment back let mut events = nodes[1].node.get_and_clear_pending_msg_events(); match events.pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + MessageSendEvent::UpdateHTLCs { + node_id: _, + channel_id: _, + updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, + } => { + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); }, _ => panic!("Unexpected event"), } - } + }; } - let expected_error_code = LocalHTLCFailureReason::IncorrectPaymentDetails; + let expected_err_code = LocalHTLCFailureReason::IncorrectPaymentDetails; // Error data is the HTLC value (100,000) and current block height - let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; + let expected_err_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; // Send a payment with the right payment hash but the wrong payment secret - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(random_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); handle_unknown_invalid_payment_data!(our_payment_hash); - expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data); + expect_payment_failed!(nodes[0], our_payment_hash, true, expected_err_code, expected_err_data); // Send a payment with a random payment hash, but the right payment secret - nodes[0].node.send_payment_with_route(route.clone(), random_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap(); - handle_unknown_invalid_payment_data!(random_payment_hash); - expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + nodes[0].node.send_payment_with_route(route.clone(), random_hash, onion, id).unwrap(); + handle_unknown_invalid_payment_data!(random_hash); + expect_payment_failed!(nodes[0], random_hash, true, expected_err_code, expected_err_data); // Send a payment with a random payment hash and random payment secret - nodes[0].node.send_payment_with_route(route, random_payment_hash, - RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap(); - handle_unknown_invalid_payment_data!(random_payment_hash); - expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); + let onion = RecipientOnionFields::secret_only(random_secret); + nodes[0].node.send_payment_with_route(route, random_hash, onion, id).unwrap(); + handle_unknown_invalid_payment_data!(random_hash); + expect_payment_failed!(nodes[0], random_hash, true, expected_err_code, expected_err_data); } #[xtest(feature = "_externalize_tests")] @@ -9018,11 +8016,14 @@ pub fn test_update_err_monitor_lockdown() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route a HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); @@ -9034,13 +8035,28 @@ pub fn test_update_err_monitor_lockdown() { let watchtower = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -9051,27 +8067,39 @@ pub fn test_update_err_monitor_lockdown() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_ref = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); + if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { + assert_eq!( + watchtower.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::InProgress + ); + assert_eq!( + nodes[0].chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } } // Our local monitor is in-sync and hasn't processed yet timeout - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); } @@ -9088,39 +8116,59 @@ pub fn test_concurrent_monitor_claim() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route a HTLC from node 0 to node 1 (but don't settle) - route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; + route_payment(&nodes[0], &[&nodes[1]], 9_000_000).0; // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); - let alice_broadcaster = test_utils::TestBroadcaster::with_blocks( - Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())), - ); + let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::new(Mutex::new( + nodes[0].blocks.lock().unwrap().clone(), + ))); let watchtower_alice = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &alice_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time // requirements here. - const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; - alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST)); + const HTLC_TIMEOUT_BROADCAST: u32 = + CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; + let next_block = (block.clone(), HTLC_TIMEOUT_BROADCAST); + alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, next_block); watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); // Watchtower Alice should have broadcast a commitment/HTLC-timeout @@ -9135,50 +8183,87 @@ pub fn test_concurrent_monitor_claim() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); - let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); + let bob_broadcaster = + test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); let watchtower_bob = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &bob_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; - watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); + let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); + watchtower_bob.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST - 1); // Route another payment to generate another update with still previous HTLC pending - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_ref = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); + if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + assert_eq!( + watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::InProgress + ); + assert_eq!( + watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + assert_eq!( + nodes[0].chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } } // Our local monitor is in-sync and hasn't processed yet timeout - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST); + watchtower_bob.chain_monitor.block_connected( + &create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), + HTLC_TIMEOUT_BROADCAST, + ); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; @@ -9192,9 +8277,11 @@ pub fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, - [nodes[1].node.get_our_node_id()], 100000); - watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); + check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, [node_b_id], 100000); + watchtower_alice.chain_monitor.block_connected( + &create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), + height, + ); check_added_monitors(&nodes[0], 1); { let htlc_txn = alice_broadcaster.txn_broadcast(); @@ -9221,25 +8308,36 @@ pub fn test_pre_lockin_no_chan_closed_update() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create an initial channel - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); - let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan_msg); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_chan_msg); // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); + + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { + txid: funding_created_msg.funding_txid, + index: funding_created_msg.funding_output_index, + }); + + let err_msg = msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }; + nodes[0].node.handle_error(node_b_id, &err_msg); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, - [nodes[1].node.get_our_node_id()], 100000); + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }; + check_closed_event!(nodes[0], 2, reason, true, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -9256,11 +8354,13 @@ pub fn test_htlc_no_detection() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000); + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 2_000_000); let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(local_txn[0].input.len(), 1); assert_eq!(local_txn[0].output.len(), 3); @@ -9271,10 +8371,14 @@ pub fn test_htlc_no_detection() { connect_block(&nodes[0], &block); // We deliberately connect the local tx twice as this should provoke a failure calling // this test before #653 fix. - chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); + chain::Listen::block_connected( + &nodes[0].chain_monitor.chain_monitor, + &block, + nodes[0].best_block_info().1 + 1, + ); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); let htlc_timeout = { @@ -9286,12 +8390,17 @@ pub fn test_htlc_no_detection() { node_txn[0].clone() }; - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()])); + connect_block( + &nodes[0], + &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]), + ); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], our_payment_hash, false); } -fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) { +fn do_test_onchain_htlc_settlement_after_close( + broadcast_alice: bool, go_onchain_before_fulfill: bool, +) { // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob --> // Carol, Alice would be the upstream node, and Carol the downstream.) @@ -9310,13 +8419,18 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001); // Steps (1) and (2): // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Check that Alice's commitment transaction now contains an output for this HTLC. let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2); @@ -9337,21 +8451,31 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain force_closing_node = 1; counterparty_node = 0; } - let error_message = "Channel force-closed"; - nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + let counterparty_node_id = nodes[counterparty_node].node.get_our_node_id(); + nodes[force_closing_node] + .node + .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, err) + .unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); - check_added_monitors!(nodes[force_closing_node], 1); - check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[counterparty_node].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[force_closing_node], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[force_closing_node], 1, reason, [counterparty_node_id], 100000); + if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), - false => get_local_commitment_txn!(nodes[1], chan_ab.2) + false => get_local_commitment_txn!(nodes[1], chan_ab.2), }; - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]), + ); if broadcast_alice { check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } } @@ -9359,80 +8483,87 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the // process of removing the HTLC from their commitment transactions. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(carol_updates.update_add_htlcs.is_empty()); assert!(carol_updates.update_fail_htlcs.is_empty()); assert!(carol_updates.update_fail_malformed_htlcs.is_empty()); assert!(carol_updates.update_fee.is_none()); assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &carol_updates.update_fulfill_htlcs[0]); let went_onchain = go_onchain_before_fulfill || force_closing_node == 1; - expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false); + let fee = if went_onchain { None } else { Some(1000) }; + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], fee, went_onchain, false); // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage. if !go_onchain_before_fulfill && broadcast_alice { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); }, _ => panic!("Unexpected event"), }; } - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &carol_updates.commitment_signed); // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update // Carol<->Bob's updated commitment transaction info. - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let bob_revocation = match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(*node_id, node_c_id); (*msg).clone() }, _ => panic!("Unexpected event"), }; let bob_updates = match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(*node_id, node_c_id); (*updates).clone() }, _ => panic!("Unexpected event"), }; - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revocation); - check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed); - check_added_monitors!(nodes[2], 1); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bob_revocation); + check_added_monitors(&nodes[2], 1); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bob_updates.commitment_signed); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let carol_revocation = match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); (*msg).clone() }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &carol_revocation); - check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_revoke_and_ack(node_c_id, &carol_revocation); + check_added_monitors(&nodes[1], 1); // If this test requires the force-closed channel to not be on-chain until after the fulfill, // here's where we put said channel's commitment tx on-chain. let mut txn_to_broadcast = alice_txn.clone(); - if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); } + if !broadcast_alice { + txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); + } if !go_onchain_before_fulfill { - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]), + ); // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -9461,7 +8592,10 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain check_spends!(bob_txn[0], txn_to_broadcast[0]); assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); } else { - assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); + assert_eq!( + bob_txn.len(), + if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 } + ); let htlc_tx = bob_txn.pop().unwrap(); check_spends!(htlc_tx, txn_to_broadcast[0]); assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); @@ -9485,41 +8619,54 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create an first channel channel - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg_chan_1_0 = + get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); // Create an second channel - nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[2].node.create_channel(node_a_id, 100000, 10001, 43, None, None).unwrap(); + let mut open_chan_msg_chan_2_0 = + get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. - open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id; + open_chan_msg_chan_2_0.common_fields.temporary_channel_id = + open_chan_msg_chan_1_0.common_fields.temporary_channel_id; // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. - nodes[0].node.handle_open_channel(nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0); + nodes[0].node.handle_open_channel(node_b_id, &open_chan_msg_chan_1_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { - assert_eq!(node_id, &nodes[1].node.get_our_node_id()); - assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); + assert_eq!(node_id, &node_b_id); + assert_eq!( + msg.common_fields.temporary_channel_id, + open_chan_msg_chan_1_0.common_fields.temporary_channel_id + ); }, _ => panic!("Unexpected event"), } } - nodes[0].node.handle_open_channel(nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0); + nodes[0].node.handle_open_channel(node_c_id, &open_chan_msg_chan_2_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { - assert_eq!(node_id, &nodes[2].node.get_our_node_id()); - assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); + assert_eq!(node_id, &node_c_id); + assert_eq!( + msg.common_fields.temporary_channel_id, + open_chan_msg_chan_1_0.common_fields.temporary_channel_id + ); }, _ => panic!("Unexpected event"), } @@ -9542,34 +8689,45 @@ pub fn test_peer_funding_sidechannel() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); let temp_chan_id_ca = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); - let (_, tx, funding_output) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let (_, tx, funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let cs_funding_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(cs_funding_events.len(), 1); match cs_funding_events[0] { - Event::FundingGenerationReady { .. } => {} + Event::FundingGenerationReady { .. } => {}, _ => panic!("Unexpected event {:?}", cs_funding_events), } - nodes[1].node.funding_transaction_generated_unchecked(temp_chan_id_ca, nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap(); - let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_created(nodes[1].node.get_our_node_id(), &funding_created_msg); - get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); - check_added_monitors!(nodes[0], 1); + let output_idx = funding_output.index; + nodes[1] + .node + .funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), output_idx) + .unwrap(); + let funding_created_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); + nodes[0].node.handle_funding_created(node_b_id, &funding_created_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, node_b_id); + expect_channel_pending_event(&nodes[0], &node_b_id); + check_added_monitors(&nodes[0], 1); - let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, nodes[1].node.get_our_node_id(), tx.clone()); + let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, node_b_id, tx); let err_msg = format!("{:?}", res.unwrap_err()); assert!(err_msg.contains("An existing channel using ID")); assert!(err_msg.contains("is open with peer")); + let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); - let reason = ClosureReason::ProcessingError { err: format!("An existing channel using ID {} is open with peer {}", channel_id, nodes[1].node.get_our_node_id()), }; - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id_ab, true, reason)]); - get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); + let err = + format!("An existing channel using ID {} is open with peer {}", channel_id, node_b_id); + let reason = ClosureReason::ProcessingError { err }; + let close_event = ExpectedCloseEvent::from_id_reason(temp_chan_id_ab, true, reason); + check_closed_events(&nodes[0], &[close_event]); + get_err_msg(&nodes[0], &node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -9584,10 +8742,13 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); let (_, tx, funding_outpoint) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let real_chan_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into @@ -9596,21 +8757,25 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); nodes[0].chain_monitor.chain_monitor.watch_channel(real_chan_id, dummy_monitor).unwrap(); - nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx.clone()).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let mut funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); // At this point, the channel should be closed, after having generated one monitor write (the // watch_channel call which failed), but zero monitor updates. - check_added_monitors!(nodes[0], 1); - get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); - let err_reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id, true, err_reason)]); + check_added_monitors(&nodes[0], 1); + get_err_msg(&nodes[0], &node_b_id); + + let reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; + let close_event = ExpectedCloseEvent::from_id_reason(temp_chan_id, true, reason); + check_closed_events(&nodes[0], &[close_event]); } #[xtest(feature = "_externalize_tests")] @@ -9625,31 +8790,37 @@ pub fn test_duplicate_funding_err_in_funding() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); - let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }; + let real_chan_funding_txo = + chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }; assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); - nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[2].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_b_id); let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; open_chan_msg.common_fields.temporary_channel_id = real_channel_id; - nodes[1].node.handle_open_channel(nodes[2].node.get_our_node_id(), &open_chan_msg); - let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_c_id, &open_chan_msg); + let mut accept_chan_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; - nodes[2].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan_msg); + nodes[2].node.handle_accept_channel(node_b_id, &accept_chan_msg); // Now that we have a second channel with the same funding txo, send a bogus funding message // and let nodes[1] remove the inbound channel. - let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42); + let (_, fund_tx, _) = create_funding_transaction(&nodes[2], &node_b_id, 100_000, 42); - nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, nodes[1].node.get_our_node_id(), funding_tx).unwrap(); + nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, node_b_id, fund_tx).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let mut funding_created_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, node_b_id); funding_created_msg.temporary_channel_id = real_channel_id; // Make the signature invalid by changing the funding output funding_created_msg.funding_output_index += 10; - nodes[1].node.handle_funding_created(nodes[2].node.get_our_node_id(), &funding_created_msg); - get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[1].node.handle_funding_created(node_c_id, &funding_created_msg); + get_err_msg(&nodes[1], &node_c_id); let err = "Invalid funding_created signature from peer".to_owned(); let reason = ClosureReason::ProcessingError { err }; let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason); @@ -9670,54 +8841,69 @@ pub fn test_duplicate_chan_id() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create an initial channel - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); // Try to create a second channel with the same temporary_channel_id as the first and check // that it is rejected. - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both the // first (valid) and second (invalid) channels are closed, given they both have // the same non-temporary channel_id. However, currently we do not, so we just // move forward with it. assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } } // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let mut funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( - funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + funding_created_msg.funding_txid.as_byte_array(), + funding_created_msg.funding_output_index, ); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; + let funding_outpoint = crate::chain::transaction::OutPoint { + txid: funding_created_msg.funding_txid, + index: funding_created_msg.funding_output_index, + }; let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a @@ -9727,65 +8913,80 @@ pub fn test_duplicate_chan_id() { // Technically this is allowed by the spec, but we don't support it and there's little reason // to. Still, it shouldn't cause any other issues. open_chan_msg.common_fields.temporary_channel_id = channel_id; - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } } // Now try to create a second channel which has a duplicate funding output. - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_2_msg); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_2_msg); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); + create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let mut a_peer_state = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); // Once we call `get_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut channel = a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap(); + let chan_id = open_chan_2_msg.common_fields.temporary_channel_id; + let mut channel = a_peer_state.channel_by_id.remove(&chan_id).unwrap(); + if let Some(mut chan) = channel.as_unfunded_outbound_v1_mut() { let logger = test_utils::TestLogger::new(); - chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() + chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger) + .map_err(|_| ()) + .unwrap() } else { panic!("Unexpected Channel phase") - }.unwrap() + } + .unwrap() }; - check_added_monitors!(nodes[0], 0); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + check_added_monitors(&nodes[0], 0); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); - check_closed_events(&nodes[1], &[ - ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError { - err: "Already had channel with the new channel_id".to_owned() - }) - ]); + let reason = ClosureReason::ProcessingError { + err: "Already had channel with the new channel_id".to_owned(), + }; + let close_event = + ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, reason); + check_closed_events(&nodes[1], &[close_event]); // ...still, nodes[1] will reject the duplicate channel. { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } @@ -9793,22 +8994,24 @@ pub fn test_duplicate_chan_id() { // finally, finish creating the original channel and send a payment over it to make sure // everything is functional. - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + let (channel_ready, _) = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); send_payment(&nodes[0], &[&nodes[1]], 8000000); @@ -9826,6 +9029,8 @@ pub fn test_error_chans_closed() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); @@ -9836,26 +9041,47 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[2].node.list_usable_channels().len(), 1); // Closing a channel from a different peer has no effect - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }, + ); assert_eq!(nodes[0].node.list_usable_channels().len(), 3); // Closing one channel doesn't impact others - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); - check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }, + ); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], false); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [nodes[1].node.get_our_node_id()], 100000); + + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); - assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); - assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2); + assert!( + nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 + || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2 + ); + assert!( + nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 + || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2 + ); // A null channel ID should close all channels let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); - check_added_monitors!(nodes[0], 2); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [nodes[1].node.get_our_node_id(); 2], 100000); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }, + ); + check_added_monitors(&nodes[0], 2); + + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; + check_closed_event!(nodes[0], 2, reason, [node_b_id; 2], 100000); + let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -9875,7 +9101,7 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); } @@ -9897,31 +9123,51 @@ pub fn test_invalid_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100_000, 10_000, 42, None, None).unwrap(); + nodes[1].node.handle_open_channel( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), + ); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); - let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); + let (temporary_channel_id, mut tx, _) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing // a panic as we'd try to extract a 32 byte preimage from a witness element without checking // its length. - let mut wit_program: Vec = channelmonitor::deliberately_bogus_accepted_htlc_witness_program(); + let mut wit_program: Vec = + channelmonitor::deliberately_bogus_accepted_htlc_witness_program(); let wit_program_script: ScriptBuf = wit_program.into(); for output in tx.output.iter_mut() { // Make the confirmed funding transaction have a bogus script_pubkey output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash()); } - nodes[0].node.funding_transaction_generated_unchecked(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap(); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0] + .node + .funding_transaction_generated_unchecked(temporary_channel_id, node_b_id, tx.clone(), 0) + .unwrap(); + nodes[1].node.handle_funding_created( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id), + ); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); - check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); + check_added_monitors(&nodes[0], 1); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_1 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 0); @@ -9932,37 +9178,48 @@ pub fn test_invalid_funding_tx() { let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }, - [nodes[0].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + + let reason = ClosureReason::ProcessingError { err: expected_err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + + check_added_monitors(&nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); if let msgs::ErrorAction::DisconnectPeer { msg } = action { - assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err); - } else { panic!(); } - } else { panic!(); } + assert_eq!( + msg.as_ref().unwrap().data, + "Channel closed because of an exception: ".to_owned() + expected_err + ); + } else { + panic!(); + } + } else { + panic!(); + } assert_eq!(nodes[1].node.list_channels().len(), 0); // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing // as its not 32 bytes long. let mut spend_tx = Transaction { - version: Version::TWO, lock_time: LockTime::ZERO, - input: tx.output.iter().enumerate().map(|(idx, _)| TxIn { - previous_output: BitcoinOutPoint { - txid: tx.compute_txid(), - vout: idx as u32, - }, - script_sig: ScriptBuf::new(), - sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, - witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness()) - }).collect(), - output: vec![TxOut { - value: Amount::from_sat(1000), - script_pubkey: ScriptBuf::new(), - }] + version: Version::TWO, + lock_time: LockTime::ZERO, + input: tx + .output + .iter() + .enumerate() + .map(|(idx, _)| TxIn { + previous_output: BitcoinOutPoint { txid: tx.compute_txid(), vout: idx as u32 }, + script_sig: ScriptBuf::new(), + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + witness: Witness::from_slice( + &channelmonitor::deliberately_bogus_accepted_htlc_witness(), + ), + }) + .collect(), + output: vec![TxOut { value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new() }], }; check_spends!(spend_tx, tx); mine_transaction(&nodes[1], &spend_tx); @@ -9982,31 +9239,35 @@ pub fn test_coinbase_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. - let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (channel_id, tx, _) = + create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[0].node.funding_transaction_generated(channel_id, node_b_id, tx.clone()).unwrap(); + check_added_monitors(&nodes[0], 0); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); + check_added_monitors(&nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); // Starting at height 0, we "confirm" the coinbase at height 1. @@ -10018,13 +9279,16 @@ pub fn test_coinbase_funding_tx() { // Now connect one more block which results in 100 confirmations of the coinbase transaction. connect_blocks(&nodes[0], 1); // There should now be a `channel_ready` which can be handled. - let _ = &nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + let _ = &nodes[1].node.handle_channel_ready( + node_a_id, + &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, node_b_id), + ); confirm_transaction_at(&nodes[1], &tx, 1); connect_blocks(&nodes[1], COINBASE_MATURITY - 2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], 1); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &node_a_id); create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); } @@ -10049,20 +9313,26 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); + + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, err).unwrap(); + check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000); - check_added_monitors!(nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); + check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -10071,7 +9341,10 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( - &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); + &nodes[1].get_block_header(conf_height), + &[(0, &node_txn[0])], + conf_height, + ); if test_height_before_timelock { // If we confirmed the close transaction, but timelocks have not yet expired, we should not // generate any events or broadcast any transactions @@ -10092,31 +9365,44 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // should immediately fail-backwards the HTLC to the previous hop, without waiting for an // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( - &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); - check_added_monitors!(nodes[1], 1); + &nodes[1].get_block_header(conf_height + 1), + &[(0, htlc_tx)], + conf_height + 1, + ); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] + ); + check_added_monitors(&nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); - expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + let failed_scid = chan_announce.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash, false, failed_scid, true); // We should also generate a SpendableOutputs event with the to_self output (once the // timelock is up). - connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + connect_blocks( + &nodes[1], + (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1, + ); let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(descriptor_spend_txn.len(), 1); // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] + ); } } @@ -10132,22 +9418,28 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route = get_route!(nodes[0], payment_params, 10_000).unwrap(); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (our_payment_preimage, our_payment_hash, our_payment_secret) = + get_payment_preimage_hash!(&nodes[1]); { - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } expect_pending_htlcs_forwardable!(nodes[1]); @@ -10155,13 +9447,14 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { { // Note that we use a different PaymentId here to allow us to duplicativly pay - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_secret.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // At this point, nodes[1] would notice it has too much value for the payment. It will // assume the second is a privacy attack (no longer particularly relevant @@ -10180,34 +9473,53 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[1], + expected_destinations + ); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors(&nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); let failure_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(failure_events.len(), 4); - if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } - if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); } - if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); } - if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[0] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = failure_events[1] { + } else { + panic!(); + } + if let Event::PaymentPathFailed { .. } = failure_events[2] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = failure_events[3] { + } else { + panic!(); + } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + check_added_monitors(&nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, conditions); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); } @@ -10238,67 +9550,87 @@ pub fn test_inconsistent_mpp_params() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); - let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); + let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + if path_a.hops[0].pubkey == node_b_id { + core::cmp::Ordering::Less + } else { + core::cmp::Ordering::Greater + } }); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); let cur_height = nodes[0].best_block_info().1; - let payment_id = PaymentId([42; 32]); + let id = PaymentId([42; 32]); let session_privs = { // We create a fake route here so that we start with three pending HTLCs, which we'll // ultimately have, just not right away. let mut dup_route = route.clone(); dup_route.paths.push(route.paths[1].clone()); - nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap() + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.test_add_new_pending_payment(hash, onion, id, &dup_route).unwrap() }; - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id, - &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(secret); + let path_a = &route.paths[0]; + let real_amt = 15_000_000; + let priv_a = session_privs[0]; + nodes[0] + .node + .test_send_payment_along_path(path_a, &hash, onion, real_amt, cur_height, id, &None, priv_a) + .unwrap(); + check_added_monitors(&nodes[0], 1); - { - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None); - } + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let path_a = &[&nodes[1], &nodes[3]]; + let event = events.pop().unwrap(); + pass_along_path(&nodes[0], path_a, real_amt, hash, Some(secret), event, false, None); assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); - check_added_monitors!(nodes[0], 1); + let path_b = &route.paths[1]; + let onion = RecipientOnionFields::secret_only(secret); + let amt_b = 14_000_000; + let priv_b = session_privs[1]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, amt_b, cur_height, id, &None, priv_b) + .unwrap(); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment @@ -10307,37 +9639,46 @@ pub fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], [fail_type]); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); - check_added_monitors!(nodes[2], 1); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }] + ); + check_added_monitors(&nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions(&nodes[0], hash, true, conditions); - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id, - &None, session_privs[2]).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(secret); + let path_b = &route.paths[1]; + let priv_c = session_privs[2]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, real_amt, cur_height, id, &None, priv_c) + .unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); + let event = events.pop().unwrap(); + let path_b = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], path_b, real_amt, hash, Some(secret), event, true, None); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage) - ); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path_a, path_b], preimage)); + expect_payment_sent(&nodes[0], preimage, Some(None), true, true); } #[xtest(feature = "_externalize_tests")] @@ -10351,20 +9692,27 @@ pub fn test_double_partial_claim() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + let (mut route, payment_hash, payment_preimage, secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + if path_a.hops[0].pubkey == node_b_id { + core::cmp::Ordering::Less + } else { + core::cmp::Ordering::Greater + } }); - send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route.clone(), paths, 15_000_000, payment_hash, secret); // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant) // amount of time to respond to. @@ -10378,22 +9726,25 @@ pub fn test_double_partial_claim() { ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], paths, false, payment_hash, reason); // nodes[1] now retries one of the two paths... - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 2); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 2); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); + let msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 15_000_000, payment_hash, Some(secret), msgs, false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); } @@ -10408,7 +9759,10 @@ enum ExposureEvent { AtUpdateFeeOutbound, } -fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) { +fn do_test_max_dust_htlc_exposure( + dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, + multiplier_dust_limit: bool, apply_excess_fee: bool, +) { // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` // policy. // @@ -10427,19 +9781,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // We hard-code the feerate values here but they're re-calculated furter down and asserted. // If the values ever change below these constants should simply be updated. const AT_FEE_OUTBOUND_HTLCS: u64 = 20; - let nondust_htlc_count_in_limit = - if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + let nondust_htlc_count_in_limit = if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound + { AT_FEE_OUTBOUND_HTLCS - } else { 0 }; + } else { + 0 + }; let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 }; let expected_dust_buffer_feerate = initial_feerate + 2530; - let mut commitment_tx_cost_msat = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty()); - commitment_tx_cost_msat += - if on_holder_tx { - htlc_success_tx_weight(&ChannelTypeFeatures::empty()) - } else { - htlc_timeout_tx_weight(&ChannelTypeFeatures::empty()) - } * (initial_feerate as u64 - 253) * nondust_htlc_count_in_limit; + let mut commitment_tx_cost_msat = commit_tx_fee_msat( + initial_feerate - 253, + nondust_htlc_count_in_limit, + &ChannelTypeFeatures::empty(), + ); + commitment_tx_cost_msat += if on_holder_tx { + htlc_success_tx_weight(&ChannelTypeFeatures::empty()) + } else { + htlc_timeout_tx_weight(&ChannelTypeFeatures::empty()) + } * (initial_feerate as u64 - 253) + * nondust_htlc_count_in_limit; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = initial_feerate; @@ -10449,30 +9809,36 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // to get roughly the same initial value as the default setting when this test was // originally written. MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost_msat) / 253) - } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost_msat) }; + } else { + MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost_msat) + }; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000; open_channel.common_fields.max_accepted_htlcs = 60; if on_holder_tx { open_channel.common_fields.dust_limit_satoshis = 546; } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let mut accept_channel = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let (chan_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); if on_holder_tx { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); if let Some(mut chan) = channel.as_unfunded_outbound_v1_mut() { chan.context.holder_dust_limit_satoshis = 546; } else { @@ -10480,17 +9846,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } } - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0].node.funding_transaction_generated(chan_id, node_b_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id), + ); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); - check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); + check_added_monitors(&nodes[0], 1); + expect_channel_pending_event(&nodes[0], &node_b_id); - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); { @@ -10504,26 +9878,41 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); - (chan.context().get_dust_buffer_feerate(None) as u64, - chan.context().get_max_dust_htlc_exposure_msat(253)) + ( + chan.context().get_dust_buffer_feerate(None) as u64, + chan.context().get_max_dust_htlc_exposure_msat(253), + ) }; assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64); - let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000; - let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; + let dust_outbound_htlc_on_holder_tx_msat: u64 = + (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - 1) * 1000; + let dust_outbound_htlc_on_holder_tx: u64 = + max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit. // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1 // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`. - let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; - let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + let dust_inbound_htlc_on_holder_tx_msat: u64 = + (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - if multiplier_dust_limit { 3 } else { 2 }) + * 1000; + let dust_inbound_htlc_on_holder_tx: u64 = + max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; // This test was written with a fixed dust value here, which we retain, but assert that it is, // indeed, dust on both transactions. let dust_htlc_on_counterparty_tx: u64 = 4; let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000; - let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; + let calcd_dust_htlc_on_counterparty_tx_msat: u64 = + (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - if multiplier_dust_limit { 3 } else { 2 }) + * 1000; assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat); assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat); @@ -10560,35 +9949,55 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } if exposure_breach_event == ExposureEvent::AtHTLCForward { - route.paths[0].hops.last_mut().unwrap().fee_msat = - if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; + route.paths[0].hops.last_mut().unwrap().fee_msat = if on_holder_tx { + dust_outbound_htlc_on_holder_tx_msat + } else { + dust_htlc_on_counterparty_tx_msat + 1 + }; // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); } else { - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); } } else if exposure_breach_event == ExposureEvent::AtHTLCReception { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 }); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); + let amount_msats = if on_holder_tx { + dust_inbound_htlc_on_holder_tx_msat + } else { + dust_htlc_on_counterparty_tx_msat + 4 + }; + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], amount_msats); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats - let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); - let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; + let dust_inbound_overflow = + dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat + * dust_outbound_htlc_on_holder_tx + + dust_inbound_htlc_on_holder_tx_msat; nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats @@ -10603,17 +10012,22 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // we need to add a lot of HTLCs that will become dust at the new feerate // to cross the threshold. for _ in 0..AT_FEE_OUTBOUND_HTLCS { - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (_, hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route.clone(), hash, onion, id).unwrap(); } { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = *feerate_lock * 10; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1); + check_added_monitors(&nodes[0], 1); + nodes[0].logger.assert_log_contains( + "lightning::ln::channel", + "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", + 1, + ); } let _ = nodes[0].node.get_and_clear_pending_msg_events(); @@ -10621,24 +10035,98 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e added_monitors.clear(); } -fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) { - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); +fn do_test_max_dust_htlc_exposure_by_threshold_type( + multiplier_dust_limit: bool, apply_excess_fee: bool, +) { + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCForward, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCForward, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCReception, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCReception, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCForward, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCReception, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCReception, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCForward, + false, + multiplier_dust_limit, + apply_excess_fee, + ); if !multiplier_dust_limit && !apply_excess_fee { // Because non-dust HTLC transaction fees are included in the dust exposure, trying to // increase the fee to hit a higher dust exposure with a // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these // in the `multiplier_dust_limit` case. - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtUpdateFeeOutbound, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtUpdateFeeOutbound, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtUpdateFeeOutbound, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtUpdateFeeOutbound, + true, + multiplier_dust_limit, + apply_excess_fee, + ); } } @@ -10669,8 +10157,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let mut config = test_default_channel_config(); // Set the dust limit to the default value - config.channel_config.max_dust_htlc_exposure = - MaxDustHTLCExposure::FeeRateMultiplier(10_000); + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(10_000); // Make sure the HTLC limits don't get in the way let chan_ty = ChannelTypeFeatures::only_static_remote_key(); config.channel_handshake_limits.min_max_accepted_htlcs = chan_utils::max_htlcs(&chan_ty); @@ -10678,11 +10165,20 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { config.channel_handshake_config.our_htlc_minimum_msat = 1; config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Leave enough on the funder side to let it pay the mining fees for a commit tx with tons of htlcs - let chan_id_1 = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 750_000_000).2; + let chan_id_1 = + create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 750_000_000).2; // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs // repeatedly until we run out of space. @@ -10692,18 +10188,27 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 { route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE); } - assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0, - "We don't want to run out of ability to send because of some non-dust limit"); - assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10, - "We should be able to fill our dust limit without too many HTLCs"); + assert_ne!( + nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, + 0, + "We don't want to run out of ability to send because of some non-dust limit" + ); + assert!( + nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10, + "We should be able to fill our dust limit without too many HTLCs" + ); let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); - assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0, - "Make sure we are able to send once we clear one HTLC"); + assert_ne!( + nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, + 0, + "Make sure we are able to send once we clear one HTLC" + ); // Skip the router complaint when node 0 will attempt to pay node 1 - let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_limit * 2); + let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], dust_limit * 2); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); @@ -10715,8 +10220,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let commitment_tx_per_htlc_cost = htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * EXCESS_FEERATE as u64; let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost; - assert!(max_htlcs_remaining < chan_utils::max_htlcs(&chan_ty).into(), - "We should be able to fill our dust limit without too many HTLCs"); + assert!( + max_htlcs_remaining < chan_utils::max_htlcs(&chan_ty).into(), + "We should be able to fill our dust limit without too many HTLCs" + ); for i in 0..max_htlcs_remaining + 1 { assert_ne!(i, max_htlcs_remaining); if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat <= dust_limit { @@ -10734,30 +10241,35 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { assert_eq!(nodes[1].node.list_channels()[0].pending_outbound_htlcs.len(), 0); // Send an additional non-dust htlc from 1 to 0, and check the complaint - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_limit * 2); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_limit * 2); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[0].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_a_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); expect_payment_failed!(nodes[1], payment_hash, false); @@ -10767,9 +10279,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { assert_eq!(nodes[1].node.list_channels()[0].pending_outbound_htlcs.len(), 0); // Send an additional non-dust htlc from 0 to 1 using the pre-calculated route above, and check the immediate complaint - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route_0_1, payment_hash_0_1, - RecipientOnionFields::secret_only(payment_secret_0_1), PaymentId(payment_hash_0_1.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret_0_1); + let id = PaymentId(payment_hash_0_1.0); + let res = nodes[0].node.send_payment_with_route(route_0_1, payment_hash_0_1, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[0].logger.assert_log("lightning::ln::outbound_payment", format!("Failed to send along path due to error: Channel unavailable: Cannot send more than our next-HTLC maximum - {} msat", 2325000), 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -10789,21 +10302,22 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { check_added_monitors(&nodes[2], 1); let send = SendEvent::from_node(&nodes[2]); - nodes[0].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_c_id, &send.msgs[0]); commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[0]); check_added_monitors(&nodes[0], 1); - let node_id_1 = nodes[1].node.get_our_node_id(); + let node_id_1 = node_b_id; expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(node_id_1), channel_id: chan_id_1 }] ); - let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]); + let fail = get_htlc_update_msgs(&nodes[0], &node_c_id); + nodes[2].node.handle_update_fail_htlc(node_a_id, &fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false); - expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[2], payment_hash, false, conditions); } fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) { @@ -10844,9 +10358,12 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set `expected_dust_exposure_msat` to match the calculation in `FundedChannel::can_accept_incoming_htlc` // only_static_remote_key: 500_492 + 22 * (724 + 172) / 1000 * 1000 + 22 * 663 / 1000 * 1000 = 533_492 // anchors_zero_htlc_fee: 500_492 + 22 * (1_124 + 172) / 1000 * 1000 = 528_492 - let mut expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + let mut expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000; if features == ChannelTypeFeatures::only_static_remote_key() { - expected_dust_exposure_msat += EXCESS_FEERATE * htlc_timeout_tx_weight(&features) / 1000 * 1000; + expected_dust_exposure_msat += + EXCESS_FEERATE * htlc_timeout_tx_weight(&features) / 1000 * 1000; assert_eq!(expected_dust_exposure_msat, 533_492); } else { assert_eq!(expected_dust_exposure_msat, 528_492); @@ -10861,23 +10378,29 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` let mut fixed_limit_config = default_config.clone(); - fixed_limit_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1); + fixed_limit_config.channel_config.max_dust_htlc_exposure = + MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config), Some(fixed_limit_config)]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config), Some(fixed_limit_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 100_000, 50_000_000).3; let node_1_dust_buffer_feerate = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&chan_id).unwrap(); chan.context().get_dust_buffer_feerate(None) as u64 }; // Skip the router complaint when node 1 will attempt to pay node 0 - let (route_1_0, payment_hash_1_0, _, payment_secret_1_0) = get_route_and_payment_hash!(nodes[1], nodes[0], NON_DUST_HTLC_MSAT); + let (route_1_0, payment_hash_1_0, _, payment_secret_1_0) = + get_route_and_payment_hash!(nodes[1], nodes[0], NON_DUST_HTLC_MSAT); // Bring node 1's dust htlc exposure up to `BASE_DUST_EXPOSURE_MSAT` for _ in 0..DUST_HTLC_COUNT { @@ -10893,30 +10416,35 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Send an additional non-dust htlc from 0 to 1, and check the complaint - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], NON_DUST_HTLC_MSAT); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], NON_DUST_HTLC_MSAT); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_failed!(nodes[0], payment_hash, false); @@ -10929,10 +10457,13 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to the `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), + let config = ChannelConfigUpdate { + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat, + )), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &config).unwrap(); // Check a successful payment send_payment(&nodes[0], &[&nodes[1]], NON_DUST_HTLC_MSAT); @@ -10948,24 +10479,34 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // The `expected_dust_exposure_msat` for the outbound htlc changes in the non-anchor case, as the htlc success and timeout transactions have different weights // only_static_remote_key: 500_492 + 22 * (724 + 172) / 1000 * 1000 + 22 * 703 / 1000 * 1000 = 534_492 if features == ChannelTypeFeatures::only_static_remote_key() { - expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000 + EXCESS_FEERATE * htlc_success_tx_weight(&features) / 1000 * 1000; + expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + + EXCESS_FEERATE + * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000 + + EXCESS_FEERATE * htlc_success_tx_weight(&features) / 1000 * 1000; assert_eq!(expected_dust_exposure_msat, 534_492); } else { assert_eq!(expected_dust_exposure_msat, 528_492); } // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1)), + let update = ChannelConfigUpdate { + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat - 1, + )), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); // Send an additional non-dust htlc from 1 to 0 using the pre-calculated route above, and check the immediate complaint - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route_1_0, payment_hash_1_0, - RecipientOnionFields::secret_only(payment_secret_1_0), PaymentId(payment_hash_1_0.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret_1_0); + let id = PaymentId(payment_hash_1_0.0); + let res = nodes[1].node.send_payment_with_route(route_1_0, payment_hash_1_0, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); + let dust_limit = if features == ChannelTypeFeatures::only_static_remote_key() { - MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + htlc_success_tx_weight(&features) * node_1_dust_buffer_feerate / 1000 * 1000 + MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + + htlc_success_tx_weight(&features) * node_1_dust_buffer_feerate / 1000 * 1000 } else { MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 }; @@ -10982,10 +10523,13 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), + let update = ChannelConfigUpdate { + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat, + )), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); // Check a successful payment send_payment(&nodes[1], &[&nodes[0]], NON_DUST_HTLC_MSAT); @@ -11002,7 +10546,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) #[test] fn test_nondust_htlc_fees_dust_exposure_delta() { do_test_nondust_htlc_fees_dust_exposure_delta(ChannelTypeFeatures::only_static_remote_key()); - do_test_nondust_htlc_fees_dust_exposure_delta(ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + do_test_nondust_htlc_fees_dust_exposure_delta( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + ); } #[xtest(feature = "_externalize_tests")] @@ -11012,37 +10558,56 @@ pub fn test_non_final_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); - let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) }; + let input = TxIn { + previous_output: BitcoinOutPoint::null(), + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence(1), + witness: Witness::from_slice(&[&[1]]), + }; assert_eq!(events.len(), 1); let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction _beyond_ the best client height + 1. - Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut { - value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), - }]} + Transaction { + version: Version(chan_id as i32), + lock_time: LockTime::from_height(best_height + 2).unwrap(), + input: vec![input], + output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), + script_pubkey: output_script.clone(), + }], + } }, _ => panic!("Unexpected event"), }; // Transaction should fail as it's evaluated as non-final for propagation. - match nodes[0].node.funding_transaction_generated(temp_channel_id, nodes[1].node.get_our_node_id(), tx.clone()) { + match nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()) { Err(APIError::APIMisuseError { err }) => { assert_eq!(format!("Funding transaction absolute timelock is non-final"), err); }, - _ => panic!() + _ => panic!(), } let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]); - assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel"); + let reason = ClosureReason::ProcessingError { err }; + let event = ExpectedCloseEvent::from_id_reason(temp_channel_id, false, reason); + check_closed_events(&nodes[0], &[event]); + assert_eq!(get_err_msg(&nodes[0], &node_b_id).data, "Failed to fund channel"); } #[xtest(feature = "_externalize_tests")] @@ -11052,159 +10617,106 @@ pub fn test_non_final_funding_tx_within_headroom() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); - let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) }; + let input = TxIn { + previous_output: BitcoinOutPoint::null(), + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence(1), + witness: Witness::from_slice(&[[1]]), + }; assert_eq!(events.len(), 1); let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction within a +1 headroom from the best block. - Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut { - value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), - }]} + Transaction { + version: Version(chan_id as i32), + lock_time: LockTime::from_consensus(best_height + 1), + input: vec![input], + output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), + script_pubkey: output_script.clone(), + }], + } }, _ => panic!("Unexpected event"), }; // Transaction should be accepted if it's in a +1 headroom from best block. - assert!(nodes[0].node.funding_transaction_generated(temp_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).is_ok()); - get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); } -#[xtest(feature = "_externalize_tests")] -pub fn accept_busted_but_better_fee() { - // If a peer sends us a fee update that is too low, but higher than our previous channel - // feerate, we should accept it. In the future we may want to consider closing the channel - // later, but for now we only accept the update. +fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) { let mut chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_chan_between_nodes(&nodes[0], &nodes[1]); - - // Set nodes[1] to expect 5,000 sat/kW. - { - let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 5000; - } - - // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 1000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); - commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); - }, - _ => panic!("Unexpected event"), - }; - - // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept - // it. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 2000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); - commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); - }, - _ => panic!("Unexpected event"), - }; - - // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the - // channel. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 1000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); - check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow { - peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, - }, [nodes[0].node.get_our_node_id()], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - }, - _ => panic!("Unexpected event"), - }; -} + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); -fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) { - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let min_final_cltv_expiry_delta = 120; - let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else { - min_final_cltv_expiry_delta - 2 }; + let min_cltv_expiry_delta = 120; + let final_cltv_expiry_delta = + if valid_delta { min_cltv_expiry_delta + 2 } else { min_cltv_expiry_delta - 2 }; let recv_value = 100_000; create_chan_between_nodes(&nodes[0], &nodes[1]); - let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32); - let (payment_hash, payment_preimage, payment_secret) = if use_user_hash { - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1], - Some(recv_value), Some(min_final_cltv_expiry_delta)); - (payment_hash, payment_preimage, payment_secret) + let payment_parameters = + PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); + let (hash, payment_preimage, secret) = if use_user_hash { + let (payment_preimage, hash, secret) = + get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); + (hash, payment_preimage, secret) } else { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap(); - (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret) + let (hash, secret) = nodes[1] + .node + .create_inbound_payment(Some(recv_value), 7200, Some(min_cltv_expiry_delta)) + .unwrap(); + (hash, nodes[1].node.get_payment_preimage(hash, secret).unwrap(), secret) }; let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); if valid_delta { - expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash { - None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id()); + let preimage = if use_user_hash { None } else { Some(payment_preimage) }; + expect_payment_claimable!(nodes[1], hash, secret, recv_value, preimage, node_b_id); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } else { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); - let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); + let fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true); - expect_payment_failed!(nodes[0], payment_hash, true); + expect_payment_failed!(nodes[0], hash, true); } } @@ -11225,19 +10737,23 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Asserts a disconnect event is queued to the user. let check_disconnect_event = |node: &Node, should_disconnect: bool| { - let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event| - if let MessageSendEvent::HandleError { action, .. } = event { - if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { - Some(()) + let disconnect_event = + node.node.get_and_clear_pending_msg_events().iter().find_map(|event| { + if let MessageSendEvent::HandleError { action, .. } = event { + if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { + Some(()) + } else { + None + } } else { None } - } else { - None - } - ); + }); assert_eq!(disconnect_event.is_some(), should_disconnect); }; @@ -11267,59 +10783,70 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { // We'll start by performing a fee update with Alice (nodes[0]) on the channel. *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; nodes[0].node.timer_tick_occurred(); - check_added_monitors!(&nodes[0], 1); - let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&&nodes[0], 1); + let alice_fee_update = get_htlc_update_msgs(&nodes[0], &node_b_id); + nodes[1].node.handle_update_fee(node_a_id, alice_fee_update.update_fee.as_ref().unwrap()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &alice_fee_update.commitment_signed); + check_added_monitors(&&nodes[1], 1); // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. - let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revoke_and_ack); - check_added_monitors!(&nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_commitment_signed); + let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bob_revoke_and_ack); + check_added_monitors(&&nodes[0], 1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bob_commitment_signed); check_added_monitors(&nodes[0], 1); // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We // pretend Bob hasn't received the message and check whether he'll disconnect Alice after // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. - let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let alice_revoke_and_ack = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); check_disconnect(&nodes[1]); // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message. // // Note that since the commitment dance didn't complete above, Alice is expected to resend her // final `RevokeAndACK` to Bob to complete it. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let bob_init = msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, }; - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &bob_init, true).unwrap(); + nodes[0].node.peer_connected(node_b_id, &bob_init, true).unwrap(); let alice_init = msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, }; - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &alice_init, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &alice_init, true).unwrap(); // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't // received Bob's yet, so she should disconnect him after reaching // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. - let alice_channel_reestablish = get_event_msg!( - nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id() - ); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &alice_channel_reestablish); + let alice_channel_reestablish = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, node_b_id); + nodes[1].node.handle_channel_reestablish(node_a_id, &alice_channel_reestablish); check_disconnect(&nodes[0]); // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live". - let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event| - if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - Some(msg.clone()) - } else { - None - } - ).unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bob_channel_reestablish); + let bob_channel_reestablish = nodes[1] + .node + .get_and_clear_pending_msg_events() + .iter() + .find_map(|event| { + if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { + assert_eq!(*node_id, node_a_id); + Some(msg.clone()) + } else { + None + } + }) + .unwrap(); + nodes[0].node.handle_channel_reestablish(node_b_id, &bob_channel_reestablish); // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages. for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { @@ -11332,7 +10859,7 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { check_disconnect(&nodes[1]); // Finally, have Bob process the last message. - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &alice_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &alice_revoke_and_ack); check_added_monitors(&nodes[1], 1); // At this point, neither node should attempt to disconnect each other, since they aren't @@ -11352,11 +10879,17 @@ pub fn test_remove_expired_outbound_unfunded_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -11368,7 +10901,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { // Asserts the outbound channel has been removed from a nodes[0]'s peer state map. let check_outbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; @@ -11388,12 +10921,19 @@ pub fn test_remove_expired_outbound_unfunded_channels() { let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id: _, + } => { + assert_eq!( + msg.data, + "Force-closing pending channel due to timeout awaiting establishment handshake" + ); }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -11403,11 +10943,17 @@ pub fn test_remove_expired_inbound_unfunded_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -11419,7 +10965,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { // Asserts the inbound channel has been removed from a nodes[1]'s peer state map. let check_inbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; @@ -11439,12 +10985,19 @@ pub fn test_remove_expired_inbound_unfunded_channels() { let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id: _, + } => { + assert_eq!( + msg.data, + "Force-closing pending channel due to timeout awaiting establishment handshake" + ); }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -11455,15 +11008,19 @@ pub fn test_channel_close_when_not_timely_accepted() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let create_chan_id = + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -11478,7 +11035,8 @@ pub fn test_channel_close_when_not_timely_accepted() { // Since we disconnected from peer and did not connect back within time, // we should have forced-closed the channel by now. - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); assert_eq!(nodes[0].node.list_channels().len(), 0); { @@ -11498,15 +11056,19 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let create_chan_id = + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -11515,12 +11077,13 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { assert_eq!(nodes[1].node.list_channels().len(), 0); // The peers now reconnect - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); // Make sure the SendOpenChannel message is added to node_0 pending message events let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -11543,53 +11106,63 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let (persister, chain_monitor); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2; send_payment(&nodes[0], &[&nodes[1]], 1_000_000); send_payment(&nodes[0], &[&nodes[2]], 1_000_000); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[2]], 1_000_000); nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); nodes[2].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); + let htlc_fulfill = get_htlc_update_msgs!(nodes[*dest], node_a_id); + let dest_node_id = nodes[*dest].node.get_our_node_id(); + nodes[0] + .node + .handle_update_fulfill_htlc(dest_node_id, &htlc_fulfill.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill.commitment_signed, false); check_added_monitors(&nodes[0], 0); } let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); let payment_id = PaymentId(payment_hash_3.0); - nodes[1].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_3); + nodes[1].node.send_payment_with_route(route, payment_hash_3, onion, payment_id).unwrap(); check_added_monitors(&nodes[1], 1); let send_event = SendEvent::from_node(&nodes[1]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if do_reload { - let nodes_0_serialized = nodes[0].node.encode(); + let node_ser = nodes[0].node.encode(); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); + let mons = [&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_ser, &mons, persister, chain_monitor, node_a_reload); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[2].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2])); @@ -11599,16 +11172,26 @@ fn do_test_multi_post_event_actions(do_reload: bool) { assert_eq!(events.len(), 4); if let Event::PaymentSent { payment_preimage, .. } = events[0] { assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); - } else { panic!(); } + } else { + panic!(); + } if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); - } else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); } + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[2] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[3] { + } else { + panic!(); + } // After the events are processed, the ChannelMonitorUpdates will be released and, upon their // completion, we'll respond to nodes[1] with an RAA + CS. - get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + get_revoke_commit_msgs(&nodes[0], &node_b_id); check_added_monitors(&nodes[0], 3); } @@ -11625,32 +11208,38 @@ pub fn test_batch_channel_open() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); // Go through the funding_created and funding_signed flow with node 2. - nodes[2].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[1]); + nodes[2].node.handle_funding_created(node_a_id, &funding_created_msgs[1]); check_added_monitors(&nodes[2], 1); - expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[2].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before persisting all monitors has been @@ -11659,9 +11248,9 @@ pub fn test_batch_channel_open() { assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); // Complete the persistence of the monitor. - nodes[0].chain_monitor.complete_sole_pending_chan_update( - &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.compute_txid(), index: 1 }) - ); + nodes[0].chain_monitor.complete_sole_pending_chan_update(&ChannelId::v1_from_funding_outpoint( + OutPoint { txid: tx.compute_txid(), index: 1 }, + )); let events = nodes[0].node.get_and_clear_pending_events(); // The transaction should only have been broadcast now. @@ -11675,14 +11264,14 @@ pub fn test_batch_channel_open() { crate::events::Event::ChannelPending { ref counterparty_node_id, .. - } if counterparty_node_id == &nodes[1].node.get_our_node_id(), + } if counterparty_node_id == &node_b_id, ))); assert!(events.iter().any(|e| matches!( *e, crate::events::Event::ChannelPending { ref counterparty_node_id, .. - } if counterparty_node_id == &nodes[2].node.get_our_node_id(), + } if counterparty_node_id == &node_c_id, ))); } @@ -11695,19 +11284,23 @@ pub fn test_close_in_funding_batch() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. @@ -11718,8 +11311,8 @@ pub fn test_close_in_funding_batch() { let funding_txo_2 = OutPoint { txid: tx.compute_txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, err).unwrap(); // The monitor should become closed. check_added_monitors(&nodes[0], 1); @@ -11728,7 +11321,10 @@ pub fn test_close_in_funding_batch() { let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); assert_eq!(monitor_updates_1.len(), 1); assert_eq!(monitor_updates_1[0].updates.len(), 1); - assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_1[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -11747,22 +11343,25 @@ pub fn test_close_in_funding_batch() { } // All channels in the batch should close immediately. - check_closed_events(&nodes[0], &[ - ExpectedCloseEvent { - channel_id: Some(channel_id_1), - discard_funding: true, - channel_funding_txo: Some(funding_txo_1), - user_channel_id: Some(42), - ..Default::default() - }, - ExpectedCloseEvent { - channel_id: Some(channel_id_2), - discard_funding: true, - channel_funding_txo: Some(funding_txo_2), - user_channel_id: Some(43), - ..Default::default() - }, - ]); + check_closed_events( + &nodes[0], + &[ + ExpectedCloseEvent { + channel_id: Some(channel_id_1), + discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), + ..Default::default() + }, + ExpectedCloseEvent { + channel_id: Some(channel_id_2), + discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), + ..Default::default() + }, + ], + ); // Ensure the channels don't exist anymore. assert!(nodes[0].node.list_channels().is_empty()); @@ -11775,29 +11374,35 @@ pub fn test_batch_funding_close_after_funding_signed() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // Go through the funding_created and funding_signed flow with node 2. - nodes[2].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[1]); + nodes[2].node.handle_funding_created(node_a_id, &funding_created_msgs[1]); check_added_monitors(&nodes[2], 1); - expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[2].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. @@ -11808,19 +11413,25 @@ pub fn test_batch_funding_close_after_funding_signed() { let funding_txo_2 = OutPoint { txid: tx.compute_txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, err).unwrap(); check_added_monitors(&nodes[0], 2); { let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); assert_eq!(monitor_updates_1.len(), 1); assert_eq!(monitor_updates_1[0].updates.len(), 1); - assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_1[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap(); assert_eq!(monitor_updates_2.len(), 1); assert_eq!(monitor_updates_2[0].updates.len(), 1); - assert!(matches!(monitor_updates_2[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_2[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); match msg_events[0] { @@ -11838,22 +11449,25 @@ pub fn test_batch_funding_close_after_funding_signed() { } // All channels in the batch should close immediately. - check_closed_events(&nodes[0], &[ - ExpectedCloseEvent { - channel_id: Some(channel_id_1), - discard_funding: true, - channel_funding_txo: Some(funding_txo_1), - user_channel_id: Some(42), - ..Default::default() - }, - ExpectedCloseEvent { - channel_id: Some(channel_id_2), - discard_funding: true, - channel_funding_txo: Some(funding_txo_2), - user_channel_id: Some(43), - ..Default::default() - }, - ]); + check_closed_events( + &nodes[0], + &[ + ExpectedCloseEvent { + channel_id: Some(channel_id_1), + discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), + ..Default::default() + }, + ExpectedCloseEvent { + channel_id: Some(channel_id_2), + discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), + ..Default::default() + }, + ], + ); // Ensure the channels don't exist anymore. assert!(nodes[0].node.list_channels().is_empty()); @@ -11866,30 +11480,41 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut min_depth_1_block_cfg = test_default_channel_config(); min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg.clone()), Some(min_depth_1_block_cfg)]); + let node_chanmgrs = create_node_chanmgrs( + 2, + &node_cfgs, + &[Some(min_depth_1_block_cfg.clone()), Some(min_depth_1_block_cfg)], + ); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); - let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }); + let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { + txid: funding_tx.compute_txid(), + index: 0, + }); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); - let (closing_node, other_node) = if confirm_remote_commitment { - (&nodes[1], &nodes[0]) - } else { - (&nodes[0], &nodes[1]) - }; - let error_message = "Channel force-closed"; - closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap(); + let (closing_node, other_node) = + if confirm_remote_commitment { (&nodes[1], &nodes[0]) } else { (&nodes[0], &nodes[1]) }; + let closing_node_id = closing_node.node.get_our_node_id(); + let other_node_id = other_node.node.get_our_node_id(); + + let err = "Channel force-closed".to_string(); + closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node_id, err).unwrap(); let mut msg_events = closing_node.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events.pop().unwrap() { - MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {}, + MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendErrorMessage { .. }, + .. + } => {}, _ => panic!("Unexpected event"), } check_added_monitors(closing_node, 1); - check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[other_node.node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(closing_node, 1, reason, false, &[other_node_id], 1_000_000); let commitment_tx = { let mut txn = closing_node.tx_broadcaster.txn_broadcast(); @@ -11904,7 +11529,8 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen check_closed_broadcast(other_node, 1, true); check_added_monitors(other_node, 1); - check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(other_node, 1, reason, false, &[closing_node_id], 1_000_000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -11931,22 +11557,28 @@ pub fn test_accept_inbound_channel_errors_queued() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23, None) { + match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, None) + { Err(APIError::ChannelUnavailable { err: _ }) => (), _ => panic!(), } - } + }, _ => panic!("Unexpected event"), } - assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, - open_channel_msg.common_fields.temporary_channel_id); + assert_eq!( + get_err_msg(&nodes[1], &node_a_id).channel_id, + open_channel_msg.common_fields.temporary_channel_id + ); } #[xtest(feature = "_externalize_tests")] @@ -11958,25 +11590,32 @@ pub fn test_manual_funding_abandon() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).is_ok()); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); - check_added_monitors!(nodes[0], 0); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temp_channel_id, _tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); + check_added_monitors(&nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); let err = msgs::ErrorMessage { channel_id: funding_signed.channel_id, data: "".to_string() }; - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &err); + nodes[0].node.handle_error(node_b_id, &err); let close_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(close_events.len(), 2); @@ -11986,7 +11625,7 @@ pub fn test_manual_funding_abandon() { assert_eq!(*channel_id, err.channel_id); assert_eq!(*outpoint, funding_outpoint); true - } + }, _ => false, })); } @@ -12000,25 +11639,32 @@ pub fn test_funding_signed_event() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).is_ok()); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); - check_added_monitors!(nodes[0], 0); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temp_channel_id, tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); + check_added_monitors(&nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); - check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); + check_added_monitors(&nodes[1], 1); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); - check_added_monitors!(nodes[0], 1); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); + check_added_monitors(&nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { @@ -12030,7 +11676,7 @@ pub fn test_funding_signed_event() { }; match &events[1] { crate::events::Event::ChannelPending { counterparty_node_id, .. } => { - assert_eq!(*&nodes[1].node.get_our_node_id(), *counterparty_node_id); + assert_eq!(*&node_b_id, *counterparty_node_id); }, _ => panic!("Unexpected event"), }; @@ -12038,13 +11684,13 @@ pub fn test_funding_signed_event() { mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); - let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[0].node.handle_channel_ready(node_b_id, &as_channel_ready); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); + expect_channel_ready_event(&nodes[1], &node_a_id); nodes[0].node.get_and_clear_pending_msg_events(); nodes[1].node.get_and_clear_pending_msg_events(); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs new file mode 100644 index 00000000000..aee764682a2 --- /dev/null +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -0,0 +1,2080 @@ +//! Various unit tests covering HTLC handling as well as tests covering channel reserve tracking. + +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; +use crate::ln::chan_utils::{ + self, commitment_tx_base_weight, htlc_success_tx_weight, CommitmentTransaction, + COMMITMENT_TX_WEIGHT_PER_HTLC, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, Channel, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, + MIN_AFFORDABLE_HTLC_COUNT, +}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::onion_utils::{self, AttributionData}; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::routing::router::PaymentParameters; +use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::types::features::ChannelTypeFeatures; +use crate::types::payment::PaymentPreimage; +use crate::util::config::UserConfig; +use crate::util::errors::APIError; + +use lightning_macros::xtest; + +use bitcoin::secp256k1::{Secp256k1, SecretKey}; + +fn do_test_counterparty_no_reserve(send_from_initiator: bool) { + // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, + // but only for them. Because some LSPs do it with some level of trust of the clients (for a + // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often + // in normal testing, we test it explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + + // Have node0 initiate a channel to node1 with aforementioned parameters + let mut push_amt = 100_000_000; + let feerate_per_kw = 253; + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + push_amt -= feerate_per_kw as u64 + * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + + let push = if send_from_initiator { 0 } else { push_amt }; + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, push, 42, None, None).unwrap(); + let mut open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + if !send_from_initiator { + open_channel_message.channel_reserve_satoshis = 0; + open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + + // Extract the channel accept message from node1 to node0 + let mut accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + if send_from_initiator { + accept_channel_message.channel_reserve_satoshis = 0; + accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); + { + let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; + let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; + let mut sender_node_per_peer_lock; + let mut sender_node_peer_state_lock; + + let channel = get_channel_ref!( + sender_node, + counterparty_node, + sender_node_per_peer_lock, + sender_node_peer_state_lock, + temp_channel_id + ); + assert!(channel.is_unfunded_v1()); + channel.funding_mut().holder_selected_channel_reserve_satoshis = 0; + channel.context_mut().holder_max_htlc_value_in_flight_msat = 100_000_000; + } + + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s + // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). + if send_from_initiator { + send_payment( + &nodes[0], + &[&nodes[1]], + 100_000_000 + // Note that for outbound channels we have to consider the commitment tx fee and the + // "fee spike buffer", which is currently a multiple of the total commitment tx fee as + // well as an additional HTLC. + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features), + ); + } else { + send_payment(&nodes[1], &[&nodes[0]], push_amt); + } +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_counterparty_no_reserve() { + do_test_counterparty_no_reserve(true); + do_test_counterparty_no_reserve(false); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_channel_reserve_holding_cell_htlcs() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + // When this test was written, the default base fee floated based on the HTLC count. + // It is now fixed, so we simply set the fee to the expected value here. + let mut config = test_default_channel_config(); + config.channel_config.forwarding_fee_base_msat = 239; + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001); + let chan_2_user_id = nodes[2].node.list_channels()[0].user_channel_id; + + let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); + + let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); + + macro_rules! expect_forward { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + check_added_monitors(&$node, 1); + let payment_event = SendEvent::from_event(events.remove(0)); + payment_event + }}; + } + + let feemsat = 239; // set above + let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2); + + let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; + + // attempt to send amt_msat > their_max_htlc_value_in_flight_msat + { + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap() + .with_max_channel_saturation_power_of_half(0); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete + // nodes[0]'s wealth + loop { + let amt_msat = recv_value_0 + total_fee_msat; + // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. + // Also, ensure that each payment has enough to be over the dust limit to + // ensure it'll be included in each commit tx fee calculation. + let commit_tx_fee_all_htlcs = + 2 * commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let ensure_htlc_amounts_above_dust_buffer = + 3 * (stat01.counterparty_dust_limit_msat + 1000); + if stat01.value_to_self_msat + < stat01.channel_reserve_msat + + commit_tx_fee_all_htlcs + + ensure_htlc_amounts_above_dust_buffer + + amt_msat + { + break; + } + + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap() + .with_max_channel_saturation_power_of_half(0); + let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); + let (payment_preimage, ..) = + send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let (stat01_, stat11_, stat12_, stat22_) = ( + get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), + get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), + ); + + assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); + assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); + assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); + assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); + stat01 = stat01_; + stat11 = stat11_; + stat12 = stat12_; + stat22 = stat22_; + } + + // adding pending output. + // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve. + // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity + // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to + // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us + // to test channel channel reserve policy at the edges of what amount is sendable, i.e. + // cases where 1 msat over X amount will cause a payment failure, but anything less than + // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting + // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments + // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee + // policy. + let commit_tx_fee_2_htlcs = 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (stat01.value_to_self_msat + - stat01.channel_reserve_msat + - total_fee_msat + - commit_tx_fee_2_htlcs) + / 2; + let amt_msat_1 = recv_value_1 + total_fee_msat; + + let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); + let payment_event_1 = { + let route = route_1.clone(); + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); + + // channel reserve test with htlc pending output > 0 + let recv_value_2 = stat01.value_to_self_msat + - amt_msat_1 + - stat01.channel_reserve_msat + - total_fee_msat + - commit_tx_fee_2_htlcs; + { + let mut route = route_1.clone(); + route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + // split the rest to test holding cell + let commit_tx_fee_3_htlcs = 2 * commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; + let recv_value_21 = recv_value_2 / 2 - additional_htlc_cost_msat / 2; + let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; + { + let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + assert_eq!( + stat.value_to_self_msat + - (stat.pending_outbound_htlcs_amount_msat + + recv_value_21 + recv_value_22 + + total_fee_msat + total_fee_msat + + commit_tx_fee_3_htlcs), + stat.channel_reserve_msat + ); + } + + // now see if they go through on both sides + let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); + // but this will stuck in the holding cell + let onion = RecipientOnionFields::secret_only(our_payment_secret_21); + let id = PaymentId(our_payment_hash_21.0); + nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, onion, id).unwrap(); + check_added_monitors(&nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // test with outbound holding cell amount > 0 + { + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + // this will also stuck in the holding cell + let onion = RecipientOnionFields::secret_only(our_payment_secret_22); + let id = PaymentId(our_payment_hash_22.0); + nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, onion, id).unwrap(); + check_added_monitors(&nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // flush the pending htlc + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event_1.commitment_msg); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + // the pending htlc should be promoted to committed + nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); + check_added_monitors(&nodes[0], 1); + let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_revoke_and_ack); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_11 = expect_forward!(nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_11.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); + + // flush the htlcs in the holding cell + assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[1]); + commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_3 = expect_forward!(nodes[1]); + assert_eq!(payment_event_3.msgs.len(), 2); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[1]); + + commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { + assert_eq!(our_payment_hash_21, *payment_hash); + assert_eq!(recv_value_21, amount_msat); + assert_eq!(node_c_id, receiver_node_id.unwrap()); + assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); + match &purpose { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret_21, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), + } + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { + assert_eq!(our_payment_hash_22, *payment_hash); + assert_eq!(recv_value_22, amount_msat); + assert_eq!(node_c_id, receiver_node_id.unwrap()); + assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); + match &purpose { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret_22, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), + } + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_21); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_22); + + let commit_tx_fee_0_htlcs = 2 * commit_tx_fee_msat(feerate, 1, &channel_type_features); + let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], recv_value_3); + + let commit_tx_fee_1_htlc = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let expected_value_to_self = stat01.value_to_self_msat + - (recv_value_1 + total_fee_msat) + - (recv_value_21 + total_fee_msat) + - (recv_value_22 + total_fee_msat) + - (recv_value_3 + total_fee_msat); + let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + assert_eq!(stat0.value_to_self_msat, expected_value_to_self); + assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); + + let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); + assert_eq!( + stat2.value_to_self_msat, + stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3 + ); +} + +#[xtest(feature = "_externalize_tests")] +pub fn channel_reserve_in_flight_removes() { + // In cases where one side claims an HTLC, it thinks it has additional available funds that it + // can send to its counterparty, but due to update ordering, the other side may not yet have + // considered those HTLCs fully removed. + // This tests that we don't count HTLCs which will not be included in the next remote + // commitment transaction towards the reserve value (as it implies no commitment transaction + // will be generated which violates the remote reserve value). + // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test. + // To test this we: + // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when + // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if + // you only consider the value of the first HTLC, it may not), + // * start routing a third HTLC from A to B, + // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put + // the other claim in its holding cell, as it immediately goes into AwaitingRAA), + // * deliver the first fulfill from B + // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell + // claim, + // * deliver A's response CS and RAA. + // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having + // removed it fully. B now has the push_msat plus the first two HTLCs in value. + // * Now B happily sends another HTLC, potentially violating its reserve value from A's point + // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); + // Route the first two HTLCs. + let payment_value_1 = + b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], payment_value_1); + let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000); + + // Start routing the third HTLC (this is just used to get everyone in the right state). + let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let send_1 = { + let onion = RecipientOnionFields::secret_only(payment_secret_3); + let id = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an + // initial fulfill/CS. + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); + check_added_monitors(&nodes[1], 1); + let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); + + // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not + // remove the second HTLC when we send the HTLC back from B to A. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); + check_added_monitors(&nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); + + nodes[1].node.handle_update_add_htlc(node_a_id, &send_1.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_1.commitment_msg); + check_added_monitors(&nodes[1], 1); + // B is already AwaitingRAA, so cant generate a CS here + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors(&nodes[1], 1); + let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[0], 1); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the + // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. + // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A + // can no longer broadcast a commitment transaction with it and B has the preimage so can go + // on-chain as necessary). + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); + check_added_monitors(&nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); + + // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't + // resolve the second HTLC from A's point of view. + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[0], 1); + expect_payment_path_successful!(nodes[0]); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back + // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = + get_route_and_payment_hash!(nodes[1], nodes[0], 10000); + let send_2 = { + let onion = RecipientOnionFields::secret_only(payment_secret_4); + let id = PaymentId(payment_hash_4.0); + nodes[1].node.send_payment_with_route(route, payment_hash_4, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[0].node.handle_update_add_htlc(node_b_id, &send_2.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_2.commitment_msg); + check_added_monitors(&nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + + // Now just resolve all the outstanding messages/HTLCs for completeness... + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[0], 1); + expect_payment_path_successful!(nodes[0]); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[0]); + expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); + + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); +} + +#[xtest(feature = "_externalize_tests")] +pub fn holding_cell_htlc_counting() { + // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs + // to ensure we don't end up with HTLCs sitting around in our holding cell for several + // commitment dance rounds. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, payment_hash_1, _, payment_secret_1) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + + let mut payments = Vec::new(); + for _ in 0..50 { + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + payments.push((payment_preimage, payment_hash)); + } + check_added_monitors(&nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let initial_payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(initial_payment_event.node_id, node_c_id); + + // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in + // the holding cell waiting on B's RAA to send. At this point we should not be able to add + // another HTLC. + { + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + let res = nodes[1].node.send_payment_with_route(route, payment_hash_1, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + // This should also be true if we try to forward a payment. + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100000); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(payment_event.node_id, node_b_id); + + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // We have to forward pending HTLCs twice - once tries to forward the payment forward (and + // fails), the second will process the resulting failure and fail the HTLC backward. + expect_pending_htlcs_forwardable!(nodes[1]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![fail]); + check_added_monitors(&nodes[1], 1); + + let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); + + let failing_scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, failing_scid, false); + + // Now forward all the pending HTLCs and claim them back + nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); + nodes[2] + .node + .handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); + check_added_monitors(&nodes[2], 1); + + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); + check_added_monitors(&nodes[1], 1); + let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); + check_added_monitors(&nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + for ref update in as_updates.update_add_htlcs.iter() { + nodes[2].node.handle_update_add_htlc(node_b_id, update); + } + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_updates.commitment_signed); + check_added_monitors(&nodes[2], 1); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); + check_added_monitors(&nodes[2], 1); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); + check_added_monitors(&nodes[1], 1); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); + check_added_monitors(&nodes[1], 1); + let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); + check_added_monitors(&nodes[2], 1); + + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), payments.len()); + for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) { + match event { + &Event::PaymentClaimable { ref payment_hash, .. } => { + assert_eq!(*payment_hash, *hash); + }, + _ => panic!("Unexpected event"), + }; + } + + for (preimage, _) in payments.drain(..) { + claim_payment(&nodes[1], &[&nodes[2]], preimage); + } + + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_basic_channel_reserve() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + + // The 2* and +1 are for the fee spike reserve. + let commit_tx_fee = 2 * commit_tx_fee_msat( + get_feerate!(nodes[0], nodes[1], chan.2), + 1 + 1, + &get_channel_type_features!(nodes[0], nodes[1], chan.2), + ); + let max_can_send = 5000000 - channel_reserve - commit_tx_fee; + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_fee_spike_violation_fails_htlc() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 3460000); + route.paths[0].hops[0].fee_msat += 1; + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); + + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], + 3460001, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 0, + amount_msat: htlc_msat, + payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + ( + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx) + .unwrap(), + ) + }; + let remote_point = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) + .unwrap() + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let local_chan_balance = 1313; + + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: 3460001, + cltv_expiry: htlc_cltv, + payment_hash, + transaction_output_index: Some(1), + }; + + let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; + + let res = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + + let commitment_tx = CommitmentTransaction::new( + commitment_number, + &remote_point, + 95000, + local_chan_balance, + feerate_per_kw, + vec![accepted_htlc_info], + &channel.funding().channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + let params = &channel.funding().channel_transaction_parameters; + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id: chan.2, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log("lightning::ln::channel", + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + + check_added_monitors(&nodes[1], 3); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { + let mut chanmon_cfgs = create_chanmon_cfgs(2); + // Set the fee rate for the channel very high, to the point where the fundee + // sending any above-dust amount would result in a channel reserve violation. + // In this test we check that we would be prevented from sending an HTLC in + // this situation. + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); + + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); + // Sending exactly enough to hit the reserve amount should be accepted + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + // However one more HTLC should be significantly over the reserve amount and fail. + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + route.paths[0].hops[0].fee_msat = 700_000; + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], + 700_000, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, + amount_msat: htlc_msat, + payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[0].node.handle_update_add_htlc(node_b_id, &msg); + // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); + assert_eq!(nodes[0].node.list_channels().len(), 0); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); + check_added_monitors(&nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, + [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { + // Test that if we receive many dust HTLCs over an outbound channel, they don't count when + // calculating our commitment transaction fee (this was previously broken). + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); + + let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 + - 1; + // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel + // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the + // commitment transaction fee. + route_payment(&nodes[1], &[&nodes[0]], dust_amt); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + // One more than the dust amt should fail, however. + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); + route.paths[0].hops[0].fee_msat += 1; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { + // Test that if we receive many dust HTLCs over an inbound channel, they don't count when + // calculating our counterparty's commitment transaction fee (this was previously broken). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); + + let payment_amt = 46000; // Dust amount + + // In the previous code, these first four payments would succeed. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // And this last payment previously resulted in nodes[1] closing on its inbound-channel + // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment + // transaction fee and therefore perceived this next payment as a channel reserve violation. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); + + let feemsat = 239; + let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); + + // Add a 2* and +1 for the fee spike reserve. + let commit_tx_fee_2_htlc = 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (chan_stat.value_to_self_msat + - chan_stat.channel_reserve_msat + - total_routing_fee_msat + - commit_tx_fee_2_htlc) + / 2; + let amt_msat_1 = recv_value_1 + total_routing_fee_msat; + + // Add a pending HTLC. + let (route_1, our_payment_hash_1, _, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); + let payment_event_1 = { + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + let route = route_1.clone(); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); + + // Attempt to trigger a channel reserve violation --> payment failure. + let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); + let recv_value_2 = chan_stat.value_to_self_msat + - amt_msat_1 + - chan_stat.channel_reserve_msat + - total_routing_fee_msat + - commit_tx_fee_2_htlcs + + 1; + let amt_msat_2 = recv_value_2 + total_routing_fee_msat; + let mut route_2 = route_1.clone(); + route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; + + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route_2.paths[0], + recv_value_2, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = onion_utils::construct_onion_packet( + onion_payloads, + onion_keys, + [0; 32], + &our_payment_hash_1, + ) + .unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 1, + amount_msat: htlc_msat + 1, + payment_hash: our_payment_hash_1, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. + nodes[1].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Remote HTLC add would put them under remote reserve value", + 3, + ); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data.clone() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_payment_route_reaching_same_channel_twice() { + //A route should not go through the same channel twice + //It is enforced when constructing a route. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), false, APIError::InvalidRoute { ref err }, + assert_eq!(err, &"Path went through the same channel twice")); + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. +// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. +//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { + //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these) + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + route.paths[0].hops[0].fee_msat = 100; + + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { + //BOLT2 Requirement: MUST offer amount_msat greater than 0. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + route.paths[0].hops[0].fee_msat = 0; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, + true, APIError::ChannelUnavailable { ref err }, + assert_eq!(err, "Cannot send 0-msat HTLC")); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Cannot send 0-msat HTLC", + 2, + ); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { + //BOLT2 Requirement: MUST offer amount_msat greater than 0. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = 0; + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + nodes[1].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Remote side tried to send a 0-msat HTLC", + 3, + ); + check_closed_broadcast!(nodes[1], true).unwrap(); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { + err: "Remote side tried to send a 0-msat HTLC".to_string(), + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { + //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000. + //It is enforced when constructing a route. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; + + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::InvalidRoute { ref err }, + assert_eq!(err, &"Channel CLTV overflowed?")); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { + //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC. + //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0. + //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + let max_accepted_htlcs = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + channel.context().counterparty_max_accepted_htlcs as u64 + }; + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + for i in 0..max_accepted_htlcs { + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let payment_event = { + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { update_add_htlcs: ref htlcs, .. }, + .. + } = events[0] + { + assert_eq!(htlcs[0].htlc_id, i); + } else { + assert!(false); + } + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + check_added_monitors(&nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); + } + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { + //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_value = 100000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2) + .counterparty_max_htlc_value_in_flight_msat; + + send_payment(&nodes[0], &[&nodes[1]], max_in_flight); + + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); + // Manually create a route over our max in flight (which our router normally automatically + // limits us to. + route.paths[0].hops[0].fee_msat = max_in_flight + 1; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + send_payment(&nodes[0], &[&nodes[1]], max_in_flight); +} + +// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message. +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { + //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let htlc_minimum_msat = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + channel.context().get_holder_htlc_minimum_msat() + }; + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat - 1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { + //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); + // The 2* and +1 are for the fee spike reserve. + let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + + let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + + // Even though channel-initiator senders are required to respect the fee_spike_reserve, + // at this time channel-initiatee receivers are not required to enforce that senders + // respect the fee_spike_reserve. + updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { + //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel + //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let send_amt = 3999999; + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000); + route.paths[0].hops[0].fee_msat = send_amt; + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys( + &Secp256k1::signing_only(), + &route.paths[0], + &session_priv, + ); + let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); + let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], + send_amt, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash) + .unwrap(); + + let mut msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 0, + amount_msat: 1000, + payment_hash: our_payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet.clone(), + skimmed_fee_msat: None, + blinding_point: None, + }; + + for i in 0..50 { + msg.htlc_id = i as u64; + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + } + msg.htlc_id = (50) as u64; + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)") + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { + //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2) + .counterparty_max_htlc_value_in_flight_msat + + 1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value") + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 1000000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { + //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let reason = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, reason, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].cltv_expiry = 500000000; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote provided CLTV expiry in seconds instead of block height"); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { + //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection. + // We test this by first testing that that repeated HTLCs pass commitment signature checks + // after disconnect and that non-sequential htlc_ids result in a channel failure. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + //Disconnect and Reconnect + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + let init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); + handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); + handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + //Resend HTLC + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + assert_eq!(updates.commitment_signed.len(), 1); + assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); + check_added_monitors(&nodes[1], 1); + let _bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)") + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + let update_msg = msgs::UpdateFulfillHTLC { + channel_id: chan.2, + htlc_id: 0, + payment_preimage: our_payment_preimage, + }; + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + let update_msg = msgs::UpdateFailHTLC { + channel_id: chan.2, + htlc_id: 0, + reason: Vec::new(), + attribution_data: Some(AttributionData::new()), + }; + + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + let update_msg = msgs::UpdateFailMalformedHTLC { + channel_id: chan.2, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { + //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 100_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.htlc_id = 1; + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { + //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 100_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors(&nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage") + .unwrap() + .is_match(err_msg.data.as_str())); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { + //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + check_added_monitors(&nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); + check_added_monitors(&nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + + let mut update_msg: msgs::UpdateFailMalformedHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fail_malformed_htlcs.len(), 1); + assert!(update_fee.is_none()); + update_fail_malformed_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + update_msg.failure_code &= !0x8000; + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); + check_added_monitors(&nodes[0], 1); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index aa2d8c668ba..7b4795962b5 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -69,6 +69,12 @@ mod async_payments_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod functional_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod htlc_reserve_unit_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod update_fee_tests; #[cfg(all(test, splicing))] #[allow(unused_mut)] mod splicing_tests; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index a2cc6e2774a..b185e739578 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -11,31 +11,46 @@ //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry //! payments thereafter. +use crate::chain::channelmonitor::{ + ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, +}; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; -use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, PaymentPurpose}; -use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; -use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; -use crate::ln::msgs; -use crate::ln::types::ChannelId; -use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; +use crate::events::{ + ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, + PaymentPurpose, +}; use crate::ln::chan_utils; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI, + EXPIRE_PREV_CONFIG_TICKS, +}; +use crate::ln::channelmanager::{ + HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCRouting, RecentPaymentDetails, + RecipientOnionFields, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, +}; +use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; -use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{ + ProbeSendFailure, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, +}; +use crate::ln::types::ChannelId; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; -use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; +use crate::routing::router::{ + get_route, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters, + Router, +}; use crate::routing::scoring::ChannelUsage; -use crate::util::config::UserConfig; -use crate::util::test_utils; +use crate::sign::EntropySource; +use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; +use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; +use crate::util::test_utils; -use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; use bitcoin::network::Network; use bitcoin::secp256k1::{Secp256k1, SecretKey}; @@ -45,10 +60,12 @@ use crate::ln::functional_test_utils; use crate::ln::functional_test_utils::*; use crate::routing::gossip::NodeId; +use core::cmp::Ordering; + #[cfg(feature = "std")] use { crate::util::time::Instant as TestTime, - std::time::{SystemTime, Instant, Duration}, + std::time::{Duration, Instant, SystemTime}, }; #[test] @@ -58,22 +75,27 @@ fn mpp_failure() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); - fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); + fail_payment_along_route(&nodes[0], paths, false, payment_hash); } #[test] @@ -83,88 +105,102 @@ fn mpp_retry() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2); + // Rebalance - send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); + send_payment(&nodes[3], &[&nodes[2]], 1_500_000); let amt_msat = 1_000_000; - let max_total_routing_fee_msat = 50_000; - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[3], payment_params, amt_msat, Some(max_total_routing_fee_msat)); + let max_fee = 50_000; + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, hash, preimage, pay_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, Some(max_fee)); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_update.contents.short_channel_id; route.paths[0].hops[1].short_channel_id = chan_3_update.contents.short_channel_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_update.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None); + let init_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 2_000_000, hash, Some(pay_secret), init_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let send_event = SendEvent::from_event(fail_path_msgs_1); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + let second_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); + let send_event = SendEvent::from_event(second_msgs); + nodes[2].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); - let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); + let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[2], 1); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); + + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, conditions); // Rebalance the channel so the second half of the payment can succeed. - send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); + send_payment(&nodes[3], &[&nodes[2]], 1_500_000); // Retry the second half of the payment and make sure it succeeds. route.paths.remove(0); route_params.final_value_msat = 1_000_000; - route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id); + let chan_4_scid = chan_4_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_4_scid); // Check the remaining max total routing fee for the second attempt is 50_000 - 1_000 msat fee // used by the first path - route_params.max_total_routing_fee_msat = Some(max_total_routing_fee_msat - 1_000); + route_params.max_total_routing_fee_msat = Some(max_fee - 1_000); route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let last_path = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], last_path, 2_000_000, hash, Some(pay_secret), event, true, None); + let claim_paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], claim_paths, preimage)); } #[test] @@ -175,28 +211,40 @@ fn mpp_retry_overpay() { // in the first attempt. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let mut limited_config_1 = user_config.clone(); - limited_config_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; - let mut limited_config_2 = user_config.clone(); - limited_config_2.channel_handshake_config.our_htlc_minimum_msat = 34_500_000; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, - &[Some(user_config.clone()), Some(limited_config_1), Some(limited_config_2), Some(user_config)]); + let mut limited_1 = user_config.clone(); + limited_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; + let mut limited_2 = user_config.clone(); + limited_2.channel_handshake_config.our_htlc_minimum_msat = 34_500_000; + let configs = [Some(user_config.clone()), Some(limited_1), Some(limited_2), Some(user_config)]; + + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let (chan_1_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0); - let (chan_2_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0); - let (_chan_3_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0); - let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + let (chan_1_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0); + let (chan_2_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0); + let (_chan_3_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0); + let (chan_4_update, _, chan_4_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0); let amt_msat = 70_000_000; - let max_total_routing_fee_msat = Some(1_000_000); + let max_fee = Some(1_000_000); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[3], payment_params, amt_msat, max_total_routing_fee_msat); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, hash, payment_preimage, pay_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, max_fee); // Check we overpay on the second path which we're about to fail. assert_eq!(chan_1_update.contents.fee_proportional_millionths, 0); @@ -209,54 +257,52 @@ fn mpp_retry_overpay() { let total_overpaid_amount = overpaid_amount_1 + overpaid_amount_2; // Initiate the payment. - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, payment_hash, - Some(payment_secret), success_path_msgs, false, None); + let init_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(pay_secret), init_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let fail_path_msgs_1 = remove_first_msg_event_to_node(&node_c_id, &mut events); let send_event = SendEvent::from_event(fail_path_msgs_1); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id - }] - ); - let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); + + let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[2], 1); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), - &htlc_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, - PaymentFailedConditions::new().mpp_parts_remain()); + let fail_conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, fail_conditions); // Rebalance the channel so the second half of the payment can succeed. - send_payment(&nodes[3], &vec!(&nodes[2])[..], 38_000_000); + send_payment(&nodes[3], &[&nodes[2]], 38_000_000); // Retry the second half of the payment and make sure it succeeds. let first_path_value = route.paths[0].final_value_msat(); @@ -264,7 +310,8 @@ fn mpp_retry_overpay() { route.paths.remove(0); route_params.final_value_msat -= first_path_value; - route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id); + let chan_4_scid = chan_4_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_4_scid); // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat // base fee, but not for overpaid value of the first try. route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 1000); @@ -276,8 +323,9 @@ fn mpp_retry_overpay() { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, - Some(payment_secret), events.pop().unwrap(), true, None); + let event = events.pop().unwrap(); + let path = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(pay_secret), event, true, None); // Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the // individual steps here. @@ -296,31 +344,38 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); + let (mut route, hash, payment_preimage, pay_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_update.contents.short_channel_id; route.paths[0].hops[1].short_channel_id = chan_3_update.contents.short_channel_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_update.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the first path. - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); + let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 200_000, hash, Some(pay_secret), node_1_msgs, false, None); if send_partial_mpp { // Time out the partial MPP @@ -329,35 +384,45 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); - let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); - assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); - nodes[1].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); + let fail = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail]); + + let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], node_b_id); + assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); + nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[3], 1); - commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); + + commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); - let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + + let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[1], 1); - commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..])); + let mut conditions = PaymentFailedConditions::new() + .mpp_parts_remain() + .expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..]); + expect_payment_failed_conditions(&nodes[0], hash, false, conditions); } else { // Pass half of the payment along the second path. - let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None); + let node_2_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); + let path = &[&nodes[2], &nodes[3]]; + let secret = Some(pay_secret); + pass_along_path(&nodes[0], path, 200_000, hash, secret, node_2_msgs, true, None); // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts for _ in 0..MPP_TIMEOUT_TICKS { nodes[3].node.timer_tick_occurred(); } - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let full_path: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], full_path, payment_preimage)); } } @@ -379,25 +444,30 @@ fn do_test_keysend_payments(public_node: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + if public_node { create_announced_chan_between_nodes(&nodes, 0, 1); } else { create_chan_between_nodes(&nodes[0], &nodes[1]); } - let payee_pubkey = nodes[1].node.get_our_node_id(); let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::for_keysend(payee_pubkey, 40, false), 10000); + PaymentParameters::for_keysend(node_b_id, 40, false), + 10000, + ); { - let test_preimage = PaymentPreimage([42; 32]); - nodes[0].node.send_spontaneous_payment( - Some(test_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0), - route_params, Retry::Attempts(1) - ).unwrap(); + let preimage = Some(PaymentPreimage([42; 32])); + let onion = RecipientOnionFields::spontaneous_empty(); + let retry = Retry::Attempts(1); + let id = PaymentId([42; 32]); + nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } + check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_pending_htlcs_forwardable!(nodes[1]); // Previously, a refactor caused us to stop including the payment preimage in the onion which @@ -406,9 +476,13 @@ fn do_test_keysend_payments(public_node: bool) { // extracting it from the onion nodes[1] received. let event = nodes[1].node.get_and_clear_pending_events(); assert_eq!(event.len(), 1); - if let Event::PaymentClaimable { purpose: PaymentPurpose::SpontaneousPayment(preimage), .. } = event[0] { - claim_payment(&nodes[0], &[&nodes[1]], preimage); - } else { panic!(); } + if let Event::PaymentClaimable { purpose, .. } = &event[0] { + if let PaymentPurpose::SpontaneousPayment(preimage) = purpose { + claim_payment(&nodes[0], &[&nodes[1]], *preimage); + } + } else { + panic!(); + } } #[test] @@ -418,38 +492,40 @@ fn test_mpp_keysend() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 0, 2); create_announced_chan_between_nodes(&nodes, 1, 3); create_announced_chan_between_nodes(&nodes, 2, 3); - let payee_pubkey = nodes[3].node.get_our_node_id(); let recv_value = 15_000_000; let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::for_keysend(payee_pubkey, 40, true), recv_value); - - let payment_preimage = PaymentPreimage([42; 32]); - let payment_secret = PaymentSecret(payment_preimage.0); - let payment_hash = nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_preimage.0), route_params, Retry::Attempts(0) - ).unwrap(); + PaymentParameters::for_keysend(node_d_id, 40, true), + recv_value, + ); + + let preimage = Some(PaymentPreimage([42; 32])); + let secret = PaymentSecret([42; 32]); + let onion = RecipientOnionFields::secret_only(secret); + let retry = Retry::Attempts(0); + let id = PaymentId([42; 32]); + let hash = + nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); check_added_monitors!(nodes[0], 2); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], expected_route[0], recv_value, payment_hash.clone(), - Some(payment_secret), ev.clone(), false, Some(payment_preimage)); + let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); + pass_along_path(&nodes[0], route[0], recv_value, hash, Some(secret), ev, false, preimage); - let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], expected_route[1], recv_value, payment_hash.clone(), - Some(payment_secret), ev.clone(), true, Some(payment_preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], expected_route, payment_preimage) - ); + let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); + pass_along_path(&nodes[0], route[1], recv_value, hash, Some(secret), ev, true, preimage); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); } #[test] @@ -462,37 +538,45 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; - let (update_a, _, chan_4_channel_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3); + let (update_a, _, chan_4_chan_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_4_id = update_a.contents.short_channel_id; let amount = 40_000; - let (mut route, payment_hash, payment_preimage, _) = get_route_and_payment_hash!(nodes[0], nodes[3], amount); + let (mut route, payment_hash, payment_preimage, _) = + get_route_and_payment_hash!(nodes[0], nodes[3], amount); + let preimage = Some(payment_preimage); // Pay along nodes[1] - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; let payment_id_0 = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); nodes[0].router.expect_find_route(route.route_params.clone().unwrap(), Ok(route.clone())); - nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_0, - route.route_params.clone().unwrap(), Retry::Attempts(0) - ).unwrap(); + let params = route.route_params.clone().unwrap(); + let onion = RecipientOnionFields::spontaneous_empty(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_0); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[3].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs!(nodes[1], node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); - nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); + nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); commitment_signed_dance!(nodes[3], nodes[1], update_1.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_update_add_htlcs(); @@ -501,16 +585,16 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { - &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => { - match forward_info.routing { - PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { - *payment_data = Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([42; 32]), - total_msat: amount * 2, - }); - }, - _ => panic!("Expected PendingHTLCRouting::ReceiveKeysend"), - } + &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + ref mut forward_info, .. + }) => match forward_info.routing { + PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { + *payment_data = Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([42; 32]), + total_msat: amount * 2, + }); + }, + _ => panic!("Expected PendingHTLCRouting::ReceiveKeysend"), }, _ => {}, } @@ -519,28 +603,29 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); // Pay along nodes[2] - route.paths[0].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_c_id; route.paths[0].hops[0].short_channel_id = chan_2_id; route.paths[0].hops[1].short_channel_id = chan_4_id; let payment_id_1 = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); nodes[0].router.expect_find_route(route.route_params.clone().unwrap(), Ok(route.clone())); - nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, - route.route_params.clone().unwrap(), Retry::Attempts(0) - ).unwrap(); + + let onion = RecipientOnionFields::spontaneous_empty(); + let params = route.route_params.clone().unwrap(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); - let update_2 = get_htlc_update_msgs!(nodes[0], nodes[2].node.get_our_node_id()); + let update_2 = get_htlc_update_msgs!(nodes[0], node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_2); + nodes[2].node.handle_update_add_htlc(node_a_id, &update_add_2); commitment_signed_dance!(nodes[2], nodes[0], &update_2.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(&nodes[2], 1); - let update_3 = get_htlc_update_msgs!(nodes[2], nodes[3].node.get_our_node_id()); + let update_3 = get_htlc_update_msgs!(nodes[2], node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &update_add_3); + nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); commitment_signed_dance!(nodes[3], nodes[2], update_3.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_update_add_htlcs(); @@ -549,7 +634,9 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { - &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => { + &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + ref mut forward_info, .. + }) => { match forward_info.routing { PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { *payment_data = Some(msgs::FinalOnionHopData { @@ -565,24 +652,27 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail_type]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] - let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); + let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); - let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail_1.update_fail_htlcs[0]); + let update_fail_1 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], update_fail_1.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); } - #[test] fn no_pending_leak_on_initial_send_failure() { // In an earlier version of our payment tracking, we'd have a retry entry even when the initial @@ -596,16 +686,21 @@ fn no_pending_leak_on_initial_send_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); assert!(!nodes[0].node.has_pending_payments()); @@ -628,46 +723,56 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let persister; let new_chain_monitor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2); // Serialize the ChannelManager prior to sending payments - let nodes_0_serialized = nodes[0].node.encode(); + let node_a_ser = nodes[0].node.encode(); // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time // out and retry. let amt_msat = 1_000_000; - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); - let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); + let (route, payment_hash, payment_preimage, secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (payment_preimage_1, payment_hash_1, _, payment_id_1) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); + let route_params = route.route_params.unwrap().clone(); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment // which would prevent retry. - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); + check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected - let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let _ = get_htlc_update_msgs!(nodes[1], node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); @@ -680,16 +785,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // The ChannelMonitor should always be the latest version, as we're required to persist it // during the `commitment_signed_dance!()`. let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + let config = test_default_channel_config(); + let mons: &[_] = &[&chan_0_monitor_serialized[..]]; + reload_node!(nodes[0], config, &node_a_ser, mons, persister, new_chain_monitor, node_a_reload); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); nodes[0].node.timer_tick_occurred(); if !confirm_before_reload { - let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + let as_broadcasted_txn = + nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_broadcasted_txn.len(), 1); assert_eq!(as_broadcasted_txn[0].compute_txid(), as_commitment_tx.compute_txid()); } else { @@ -697,29 +805,35 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } check_added_monitors!(nodes[0], 1); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[1].node.peer_disconnected(node_a_id); + + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 2); match as_err[1] { - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), msg); + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + } => { + assert_eq!(node_id, node_b_id); + nodes[1].node.handle_error(node_a_id, msg); check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &nodes[1].node.get_our_node_id())) }, [nodes[0].node.get_our_node_id()], 100000); + &node_b_id)) }, [node_a_id], 100000); check_added_monitors!(nodes[1], 1); - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); }, _ => panic!("Unexpected event"), } @@ -731,10 +845,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &htlc_fulfill.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); - commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); + commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill.commitment_signed, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); if confirm_before_reload { @@ -765,7 +879,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } mine_transaction(&nodes[0], &bs_htlc_claim_txn); expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false); - connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20); + connect_blocks(&nodes[0], TEST_FINAL_CLTV * 4 + 20); let (first_htlc_timeout_tx, second_htlc_timeout_tx) = { let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 2); @@ -773,13 +887,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { }; check_spends!(first_htlc_timeout_tx, as_commitment_tx); check_spends!(second_htlc_timeout_tx, as_commitment_tx); - if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output { + if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output + { confirm_transaction(&nodes[0], &second_htlc_timeout_tx); } else { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); } nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was // reloaded) via a route over the new channel, which work without issue and eventually be @@ -791,8 +907,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // do_claim_payment_along_route expects us to never overpay. { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id()) - .unwrap().lock().unwrap(); + let mut peer_state = per_peer_state.get(&node_c_id).unwrap().lock().unwrap(); let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap(); let mut new_config = channel.context().config(); new_config.forwarding_fee_base_msat += 100_000; @@ -805,17 +920,24 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.timer_tick_occurred(); } - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, // Shouldn't be allowed to retry a fulfilled payment - RecipientOnionFields::secret_only(payment_secret), payment_id_1).is_err()); - nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + // Check that we cannot retry a fulfilled payment + nodes[0] + .node + .send_payment_with_route(new_route.clone(), payment_hash, onion, payment_id_1) + .unwrap_err(); + // ...but if we send with a different PaymentId the payment should fly + let id = PaymentId(payment_hash.0); + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, 1_000_000, payment_hash, Some(secret), event, true, None); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], payment_preimage)); expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat)); } @@ -837,20 +959,25 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let mut manually_accept_config = test_default_channel_config(); manually_accept_config.manually_accept_inbound_channels = true; - let first_persister; - let first_new_chain_monitor; - let second_persister; - let second_new_chain_monitor; - let third_persister; - let third_new_chain_monitor; + let persist_1; + let chain_1; + let persist_2; + let chain_2; + let persist_3; + let chain_3; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); - let first_nodes_0_deserialized; - let second_nodes_0_deserialized; - let third_nodes_0_deserialized; + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); + let node_a_reload_1; + let node_a_reload_2; + let node_a_reload_3; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Because we set nodes[1] to manually accept channels, just open a 0-conf channel. let (funding_tx, chan_id) = open_zero_conf_channel(&nodes[0], &nodes[1], None); confirm_transaction(&nodes[0], &funding_tx); @@ -861,48 +988,59 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; // Serialize the ChannelManager prior to sending payments - let mut nodes_0_serialized = nodes[0].node.encode(); + let mut node_a_ser = nodes[0].node.encode(); - let route = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }).0; - let (payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], if use_dust { 1_000 } else { 1_000_000 }); + let amt = if use_dust { 1_000 } else { 1_000_000 }; + let route = get_route_and_payment_hash!(nodes[0], nodes[2], amt).0; + let (payment_preimage, hash, payment_secret, payment_id) = + send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], amt); // The ChannelMonitor should always be the latest version, as we're required to persist it // during the `commitment_signed_dance!()`. - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, node_a_ser, &[&mon_ser], persist_1, chain_1, node_a_reload_1); + nodes[1].node.peer_disconnected(node_a_id); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 2); let bs_commitment_tx; match as_err[1] { - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } - , [nodes[0].node.get_our_node_id()], 100000); + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + } => { + assert_eq!(node_id, node_b_id); + nodes[1].node.handle_error(node_a_id, msg); + let msg = format!( + "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + &node_b_id + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); check_added_monitors!(nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, @@ -913,15 +1051,18 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional // incoming HTLCs with the same payment hash later. - nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[2].node.fail_htlc_backwards(&hash); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] + ); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -954,10 +1095,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs) // confirming, we will fail as it's considered still-pending... - let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); - match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt); + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -966,19 +1108,21 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // (which should also still work). connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); + expect_payment_failed_conditions(&nodes[0], hash, false, PaymentFailedConditions::new()); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode(); - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); // After the payment failed, we're free to send it again. - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + let config = test_default_channel_config(); + let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + reload_node!(nodes[0], config, node_a_ser, monitors, persist_2, chain_2, node_a_reload_2); + nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -988,34 +1132,38 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures // the payment is not (spuriously) listed as still pending. - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret); + pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode(); - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); // Check that after reload we can send the payment again (though we shouldn't, since it was // claimed previously). - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + let config = test_default_channel_config(); + let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + reload_node!(nodes[0], config, node_a_ser, monitors, persist_3, chain_3, node_a_reload_3); + nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - match nodes[0].node.send_payment_with_route(new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route, hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1026,7 +1174,9 @@ fn test_completed_payment_not_retryable_on_reload() { do_test_completed_payment_not_retryable_on_reload(false); } -fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) { +fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( + persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool, +) { // When a Channel is closed, any outbound HTLCs which were relayed through it are simply // dropped. From there, the ChannelManager relies on the ChannelMonitor having a copy of the // relevant fail-/claim-back data and processes the HTLC fail/claim when the ChannelMonitor tells @@ -1038,24 +1188,28 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; + let error_message = "Channel force-closed".to_string(); // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_b_id, error_message).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); @@ -1074,7 +1228,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let htlc_success_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -1088,7 +1242,8 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); } - let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] }); + let txn = if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] }; + let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); if payment_timeout { assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV! @@ -1118,7 +1273,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo chan_manager_serialized = nodes[0].node.encode(); } - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); if payment_timeout { expect_payment_failed!(nodes[0], payment_hash, false); } else { @@ -1132,7 +1287,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo } // Now reload nodes[0]... - reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node!(nodes[0], &chan_manager_serialized, &[&mon_ser], persister, chain, node_a_reload); if persist_manager_post_event { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -1171,38 +1326,43 @@ fn test_fulfill_restart_failure() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_1_deserialized; + let node_b_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state // pre-fulfill, which we do by serializing it here. let chan_manager_serialized = nodes[1].node.encode(); - let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode(); + let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... - reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!(nodes[1], &chan_manager_serialized, &[&mon_ser], persister, chain, node_b_reload); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); + + let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false); // nodes[0] shouldn't generate any events here, while it just got a payment failure completion // it had already considered the payment fulfilled, and now they just got free money. @@ -1216,34 +1376,48 @@ fn get_ldk_payment_preimage() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 60_000; let expiry_secs = 60 * 60; - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap(); + let (payment_hash, payment_secret) = + nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let scorer = test_utils::TestScorer::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let route = get_route( &nodes[0].node.get_our_node_id(), &route_params, + let route = get_route( + &node_a_id, + &route_params, &nodes[0].network_graph.read_only(), - Some(&nodes[0].node.list_usable_channels().iter().collect::>()), nodes[0].logger, - &scorer, &Default::default(), &random_seed_bytes).unwrap(); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); // Make sure to use `get_payment_preimage` - let payment_preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + let preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let secret = Some(payment_secret); + let path = &[&nodes[1]]; + pass_along_path(&nodes[0], path, amt_msat, payment_hash, secret, event, true, Some(preimage)); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); } #[test] @@ -1253,6 +1427,8 @@ fn sent_probe_is_probe_of_sending_node() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1273,7 +1449,7 @@ fn sent_probe_is_probe_of_sending_node() { _ => panic!(), } - get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + get_htlc_update_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[0], 1); } @@ -1308,30 +1484,33 @@ fn failed_probe_yields_event() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42); - - let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 9_998_000); + let params = PaymentParameters::from_node_id(node_c_id, 42); + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], params, 9_998_000); let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, updates); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); + nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); // node[0] <- update_fail_htlcs -- node[1] check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); // Skip the PendingHTLCsForwardable event let _events = nodes[1].node.get_and_clear_pending_events(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors!(nodes[0], 0); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); @@ -1356,10 +1535,14 @@ fn onchain_failed_probe_yields_event() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes(&nodes, 1, 2); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42); + let payment_params = PaymentParameters::from_node_id(node_c_id, 42); // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain. let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 1_000); @@ -1367,15 +1550,15 @@ fn onchain_failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), chan_id, updates); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); + nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let _ = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let _ = get_htlc_update_msgs!(nodes[1], node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on // Node A, which after 6 confirmations should result in a probe failure event. @@ -1408,13 +1591,16 @@ fn preflight_probes_yield_event_skip_private_hop() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(5).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); + let configs = [config.clone(), config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs[..]); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_d_id = nodes[3].node.get_our_node_id(); + // Setup channel topology: // N0 -(1M:0)- N1 -(1M:0)- N2 -(70k:0)- N3 -(50k:0)- N4 @@ -1426,14 +1612,16 @@ fn preflight_probes_yield_event_skip_private_hop() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 50_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1450,13 +1638,16 @@ fn preflight_probes_yield_event() { let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(4).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &user_configs); + let configs = [config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs[..]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_d_id = nodes[3].node.get_our_node_id(); + // Setup channel topology: // (1M:0)- N1 -(30k:0) // / \ @@ -1472,14 +1663,16 @@ fn preflight_probes_yield_event() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 50_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1496,13 +1689,17 @@ fn preflight_probes_yield_event_and_skip() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(5).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); + let configs = + [config.clone(), config.clone(), config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs[..]); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_e_id = nodes[4].node.get_our_node_id(); + // Setup channel topology: // (30k:0)- N2 -(1M:0) // / \ @@ -1519,14 +1716,16 @@ fn preflight_probes_yield_event_and_skip() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 80_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; // We check that only one probe was sent, the other one was skipped due to limited liquidity. assert_eq!(res.len(), 1); @@ -1548,15 +1747,18 @@ fn claimed_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); + let (route, hash_b, preimage_b, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (preimage_a, _, _, payment_id) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let send_result = + nodes[0].node.send_payment_with_route(route.clone(), hash_b, onion, payment_id); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1565,14 +1767,17 @@ fn claimed_send_payment_idempotent() { // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. let send_result = nodes[0].node.send_spontaneous_payment( - None, RecipientOnionFields::spontaneous_empty(), payment_id, - route.route_params.clone().unwrap(), Retry::Attempts(0) + None, + RecipientOnionFields::spontaneous_empty(), + payment_id, + route.route_params.clone().unwrap(), + Retry::Attempts(0), ); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } - } + }; } check_send_rejected!(); @@ -1580,9 +1785,7 @@ fn claimed_send_payment_idempotent() { // Claim the payment backwards, but note that the PaymentSent event is still pending and has // not been seen by the user. At this point, from the user perspective nothing has changed, so // we must remain just as idempotent as we were before. - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], first_payment_preimage) - ); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage_a)); for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS { nodes[0].node.timer_tick_occurred(); @@ -1595,7 +1798,7 @@ fn claimed_send_payment_idempotent() { // the payment complete. However, they could have called `send_payment` while the event was // being processed, leading to a race in our idempotency guarantees. Thus, even immediately // after the event is handled a duplicate payment should sitll be rejected. - expect_payment_sent!(&nodes[0], first_payment_preimage, Some(0)); + expect_payment_sent!(&nodes[0], preimage_a, Some(0)); check_send_rejected!(); // If relatively little time has passed, a duplicate payment should still fail. @@ -1609,11 +1812,11 @@ fn claimed_send_payment_idempotent() { nodes[0].node.timer_tick_occurred(); } - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); - claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); + pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); + claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } #[test] @@ -1627,37 +1830,44 @@ fn abandoned_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); + let (route, hash_b, second_payment_preimage, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (_, first_payment_hash, _, payment_id) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let send_result = + nodes[0].node.send_payment_with_route(route.clone(), hash_b, onion, payment_id); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), - } + }; // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. let send_result = nodes[0].node.send_spontaneous_payment( - None, RecipientOnionFields::spontaneous_empty(), payment_id, - route.route_params.clone().unwrap(), Retry::Attempts(0) + None, + RecipientOnionFields::spontaneous_empty(), + payment_id, + route.route_params.clone().unwrap(), + Retry::Attempts(0), ); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } - } + }; } check_send_rejected!(); nodes[1].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -1666,14 +1876,15 @@ fn abandoned_send_payment_idempotent() { } check_send_rejected!(); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash, PaymentFailureReason::RecipientRejected); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash, reason); // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the // failed payment back. - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); + pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -1685,7 +1896,7 @@ enum InterceptTest { } #[test] -fn test_trivial_inflight_htlc_tracking(){ +fn test_trivial_inflight_htlc_tracking() { // In this test, we test three scenarios: // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs @@ -1695,6 +1906,10 @@ fn test_trivial_inflight_htlc_tracking(){ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1702,33 +1917,36 @@ fn test_trivial_inflight_htlc_tracking(){ let (_, payment_hash, _, payment_id) = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_a_id), + &NodeId::from_pubkey(&node_b_id), + channel_1.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_1_used_liquidity, None); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_b_id), + &NodeId::from_pubkey(&node_c_id), + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, None); } let pending_payments = nodes[0].node.list_recent_payments(); assert_eq!(pending_payments.len(), 1); - assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id }); + let details = RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id }; + assert_eq!(pending_payments[0], details); // Remove fulfilled payment for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS { @@ -1736,37 +1954,41 @@ fn test_trivial_inflight_htlc_tracking(){ } // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment. - let (payment_preimage, payment_hash, _, payment_id) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); + let (payment_preimage, payment_hash, _, payment_id) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_a_id), + &NodeId::from_pubkey(&node_b_id), + channel_1.context().get_short_channel_id().unwrap(), ); // First hop accounts for expected 1000 msat fee assert_eq!(chan_1_used_liquidity, Some(501000)); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_b_id), + &NodeId::from_pubkey(&node_c_id), + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, Some(500000)); } let pending_payments = nodes[0].node.list_recent_payments(); assert_eq!(pending_payments.len(), 1); - assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 }); + let details = RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 }; + assert_eq!(pending_payments[0], details); // Now, let's claim the payment. This should result in the used liquidity to return `None`. claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1778,26 +2000,28 @@ fn test_trivial_inflight_htlc_tracking(){ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel_1.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_a_id), + &NodeId::from_pubkey(&node_b_id), + channel_1.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_1_used_liquidity, None); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), - channel_2.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_b_id), + &NodeId::from_pubkey(&node_c_id), + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, None); } @@ -1812,33 +2036,42 @@ fn test_holding_cell_inflight_htlcs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, _, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. { - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); check_added_monitors!(nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); let used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), - channel.context().get_short_channel_id().unwrap() + &NodeId::from_pubkey(&node_a_id), + &NodeId::from_pubkey(&node_b_id), + channel.context().get_short_channel_id().unwrap(), ); assert_eq!(used_liquidity, Some(2000000)); @@ -1867,9 +2100,16 @@ fn do_test_intercepted_payment(test: InterceptTest) { zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]); + + let configs = [None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); @@ -1877,30 +2117,36 @@ fn do_test_intercepted_payment(test: InterceptTest) { let amt_msat = 100_000; let intercept_scid = nodes[1].node.get_intercept_scid(); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_route_hints(vec![ - RouteHint(vec![RouteHintHop { - src_node_id: nodes[1].node.get_our_node_id(), - short_channel_id: intercept_scid, - fees: RoutingFees { - base_msat: 1000, - proportional_millionths: 0, - }, - cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, - htlc_minimum_msat: None, - htlc_maximum_msat: None, - }]) - ]).unwrap() - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_route_hints(vec![RouteHint(vec![RouteHintHop { + src_node_id: node_b_id, + short_channel_id: intercept_scid, + fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, + cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }])]) + .unwrap() + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = get_route( - &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes - ).unwrap(); - - let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + + let (hash, payment_secret) = + nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route.clone(), hash, onion, id).unwrap(); let payment_event = { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); @@ -1911,41 +2157,52 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - let (intercept_id, expected_outbound_amount_msat) = match events[0] { + let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { - intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id + intercept_id, + expected_outbound_amount_msat, + payment_hash, + inbound_amount_msat, + requested_next_hop_scid: short_channel_id, } => { - assert_eq!(pmt_hash, payment_hash); + assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); (intercept_id, expected_outbound_amount_msat) }, - _ => panic!() + _ => panic!(), }; // Check for unknown channel id error. - let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); + let chan_id = ChannelId::from_bytes([42; 32]); + let unknown_chan_id_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); + let err = format!( + "Channel with id {} not found for the passed counterparty node_id {}", + log_bytes!([42; 32]), + node_c_id, + ); + assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); + let fail = + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], [fail]); nodes[1].node.process_pending_htlc_forwards(); - let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false); // Ensure the payment fails with the expected error. @@ -1953,21 +2210,27 @@ fn do_test_intercepted_payment(test: InterceptTest) { .blamed_scid(intercept_scid) .blamed_chan_closed(true) .expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[]); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); + expect_payment_failed_conditions(&nodes[0], hash, false, fail_conditions); } else if test == InterceptTest::Forward { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. - let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { - err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", - temp_chan_id, nodes[2].node.get_our_node_id()) }); + let temp_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap(); + let unusable_chan_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_id, node_c_id, outbound_amt); + let err = format!( + "Channel with id {} for the passed counterparty node_id {} is still opening.", + temp_id, node_c_id, + ); + assert_eq!(unusable_chan_err, Err(APIError::ChannelUnavailable { err })); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); // Open the just-in-time channel so the payment can then be forwarded. - let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); + let (_, chan_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); // Finally, forward the intercepted payment through and claim it. - nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap(); + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt) + .unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let payment_event = { @@ -1980,30 +2243,31 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); - expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id()); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let preimage = Some(nodes[2].node.get_payment_preimage(hash, payment_secret).unwrap()); + expect_payment_claimable!(&nodes[2], hash, payment_secret, amt_msat, preimage, node_c_id); + + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, preimage.unwrap())); + let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => { - assert_eq!(payment_preimage, *ev_preimage); - assert_eq!(payment_hash, *ev_hash); + Event::PaymentSent { payment_preimage, payment_hash, ref fee_paid_msat, .. } => { + assert_eq!(preimage.unwrap(), payment_preimage); + assert_eq!(hash, payment_hash); assert_eq!(fee_paid_msat, &Some(1000)); }, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentPathSuccessful { payment_hash: hash, .. } => { - assert_eq!(hash, Some(payment_hash)); + Event::PaymentPathSuccessful { payment_hash, .. } => { + assert_eq!(payment_hash, Some(hash)); }, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } check_added_monitors(&nodes[0], 1); } else if test == InterceptTest::Timeout { @@ -2015,24 +2279,33 @@ fn do_test_intercepted_payment(test: InterceptTest) { connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); + let fail_type = + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); - assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); - assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); - assert!(htlc_timeout_updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); - expect_payment_failed!(nodes[0], payment_hash, false, LocalHTLCFailureReason::TemporaryNodeFailure, []); + let htlc_fail = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(htlc_fail.update_add_htlcs.is_empty()); + assert_eq!(htlc_fail.update_fail_htlcs.len(), 1); + assert!(htlc_fail.update_fail_malformed_htlcs.is_empty()); + assert!(htlc_fail.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fail.commitment_signed, false); + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + expect_payment_failed!(nodes[0], hash, false, reason, []); // Check for unknown intercept id error. - let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); - let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); - let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); - assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); + let (_, chan_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); + let unknown_intercept_id_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); + let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); + assert_eq!(unknown_intercept_id_err, Err(APIError::APIMisuseError { err })); + + let unknown_intercept_id_err = + nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); + let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); + assert_eq!(unknown_intercept_id_err, APIError::APIMisuseError { err }); } } @@ -2046,25 +2319,36 @@ fn accept_underpaying_htlcs_config() { fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; - intercept_forwards_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; + intercept_forwards_config + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; let mut underpay_config = test_default_channel_config(); underpay_config.channel_config.accept_underpaying_htlcs = true; - underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]); + underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = + max_in_flight_percent; + + let configs = [None, Some(intercept_forwards_config), Some(underpay_config)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let amt_msat = 900_000; let mut chan_ids = Vec::new(); for _ in 0..num_mpp_parts { // We choose the channel size so that there can be at most one part pending on each channel. - let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; + let channel_size = + amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0); - let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0).0.channel_id; - chan_ids.push(channel_id); + let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0); + chan_ids.push(chan.0.channel_id); } // Send the initial payment. @@ -2072,31 +2356,35 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let mut route_hints = Vec::new(); for _ in 0..num_mpp_parts { route_hints.push(RouteHint(vec![RouteHintHop { - src_node_id: nodes[1].node.get_our_node_id(), + src_node_id: node_b_id, short_channel_id: nodes[1].node.get_intercept_scid(), - fees: RoutingFees { - base_msat: 1000, - proportional_millionths: 0, - }, + fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, htlc_minimum_msat: None, htlc_maximum_msat: Some(amt_msat / num_mpp_parts as u64 + 5), }])); } - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_route_hints(route_hints).unwrap() - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_route_hints(route_hints) + .unwrap() + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); + let (payment_hash, payment_secret) = + nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); + check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path - let mut events: Vec = nodes[0].node.get_and_clear_pending_msg_events().into_iter().map(|e| SendEvent::from_event(e)).collect(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); // Forward the intercepted payments. for (idx, ev) in events.into_iter().enumerate() { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &ev.msgs[0]); + let ev = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(node_a_id, &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2104,17 +2392,23 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { crate::events::Event::HTLCIntercepted { - intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, .. + intercept_id, + expected_outbound_amount_msat, + payment_hash: pmt_hash, + .. } => { assert_eq!(pmt_hash, payment_hash); (intercept_id, expected_outbound_amount_msat) }, - _ => panic!() + _ => panic!(), }; - nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_ids[idx], - nodes[2].node.get_our_node_id(), expected_outbound_amt_msat - skimmed_fee_msat).unwrap(); + let amt = expected_outbound_amt_msat - skimmed_fee_msat; + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &chan_ids[idx], node_c_id, amt) + .unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); - let payment_event = { + let pay_event = { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -2124,25 +2418,31 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, false, true); + nodes[2].node.handle_update_add_htlc(node_b_id, &pay_event.msgs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &pay_event.commitment_msg, false, true); if idx == num_mpp_parts - 1 { expect_pending_htlcs_forwardable!(nodes[2]); } } // Claim the payment and check that the skimmed fee is as expected. - let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + let payment_preimage = + nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { crate::events::Event::PaymentClaimable { - ref payment_hash, ref purpose, amount_msat, counterparty_skimmed_fee_msat, receiver_node_id, .. + ref payment_hash, + ref purpose, + amount_msat, + counterparty_skimmed_fee_msat, + receiver_node_id, + .. } => { assert_eq!(payment_hash, payment_hash); assert_eq!(amt_msat - skimmed_fee_msat * num_mpp_parts as u64, amount_msat); assert_eq!(skimmed_fee_msat * num_mpp_parts as u64, counterparty_skimmed_fee_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(node_c_id, receiver_node_id.unwrap()); match purpose { crate::events::PaymentPurpose::Bolt11InvoicePayment { payment_preimage: ev_payment_preimage, @@ -2159,15 +2459,19 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { } let mut expected_paths_vecs = Vec::new(); let mut expected_paths = Vec::new(); - for _ in 0..num_mpp_parts { expected_paths_vecs.push(vec!(&nodes[1], &nodes[2])); } - for i in 0..num_mpp_parts { expected_paths.push(&expected_paths_vecs[i][..]); } + for _ in 0..num_mpp_parts { + expected_paths_vecs.push(vec![&nodes[1], &nodes[2]]); + } + for i in 0..num_mpp_parts { + expected_paths.push(&expected_paths_vecs[i][..]); + } expected_paths[0].last().unwrap().node.claim_funds(payment_preimage); let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_paths[..], payment_preimage) .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]); let total_fee_msat = pass_claimed_payment_along_route(args); // The sender doesn't know that the penultimate hop took an extra fee. - expect_payment_sent(&nodes[0], payment_preimage, - Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true); + let amt = total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64; + expect_payment_sent(&nodes[0], payment_preimage, Some(Some(amt)), true, true); } #[derive(PartialEq)] @@ -2195,12 +2499,17 @@ fn do_automatic_retries(test: AutoRetry) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain_monitor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let node_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2; @@ -2214,41 +2523,43 @@ fn do_automatic_retries(test: AutoRetry) { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (_, hash, preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + [HTLCHandlingFailureType::Forward { + node_id: Some(node_c_id), channel_id: $failing_channel_id, }]); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false); // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { - assert_eq!(payment_hash, ev_payment_hash); + Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => { + assert_eq!(hash, payment_hash); assert_eq!(payment_failed_permanently, false); }, _ => panic!("Unexpected event"), @@ -2260,8 +2571,8 @@ fn do_automatic_retries(test: AutoRetry) { } } else { match events[1] { - Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => { - assert_eq!(Some(payment_hash), ev_payment_hash); + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(Some(hash), payment_hash); }, _ => panic!("Unexpected event"), } @@ -2271,8 +2582,10 @@ fn do_automatic_retries(test: AutoRetry) { if test == AutoRetry::Success { // Test that we can succeed on the first retry. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params, retry).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -2282,16 +2595,25 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let event = msg_events.pop().unwrap(); + + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(payment_secret), event, true, None); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + preimage, + )); } else if test == AutoRetry::Spontaneous { - nodes[0].node.send_spontaneous_payment(Some(payment_preimage), - RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params.clone(), - Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::spontaneous_empty(); + let id = PaymentId(hash.0); + nodes[0] + .node + .send_spontaneous_payment(Some(preimage), onion, id, route_params, Retry::Attempts(1)) + .unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -2301,16 +2623,19 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, None, msg_events.pop().unwrap(), true, Some(payment_preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let event = msg_events.pop().unwrap(); + + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, amt_msat, hash, None, event, true, Some(preimage)); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); } else if test == AutoRetry::FailAttempts { // Ensure ChannelManager will not retry a payment if it has run out of payment attempts. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -2326,10 +2651,13 @@ fn do_automatic_retries(test: AutoRetry) { let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 0); } else if test == AutoRetry::FailTimeout { - #[cfg(feature = "std")] { + #[cfg(feature = "std")] + { // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + let retry = Retry::Timeout(Duration::from_secs(60)); + nodes[0].node.send_payment(hash, onion, id, route_params, retry).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Advance the time so the second attempt fails due to timeout. @@ -2343,10 +2671,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2354,8 +2682,9 @@ fn do_automatic_retries(test: AutoRetry) { } else if test == AutoRetry::FailOnRestart { // Ensure ChannelManager will not retry a payment after restart, even if there were retry // attempts remaining prior to restart. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(2)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -2368,8 +2697,8 @@ fn do_automatic_retries(test: AutoRetry) { // Restart the node and ensure that ChannelManager does not use its remaining retry attempt let node_encoded = nodes[0].node.encode(); - let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode(); - reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized); + let mon_ser = get_monitor!(nodes[0], channel_id_1).encode(); + reload_node!(nodes[0], node_encoded, &[&mon_ser], persister, chain_monitor, node_a_reload); let mut events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true); @@ -2380,16 +2709,17 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, reason.unwrap()); }, _ => panic!("Unexpected event"), } } else if test == AutoRetry::FailOnRetry { - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should @@ -2401,10 +2731,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RouteNotFound, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RouteNotFound, reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2419,15 +2749,23 @@ fn auto_retry_partial_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open three channels, the first has plenty of liquidity, the second and third have ~no // available liquidity, causing any outbound payments routed over it to fail immediately. let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; + let chan_2 = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_2_id = chan_2.0.contents.short_channel_id; + let chan_3 = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_3_id = chan_3.0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 10_000_000; - let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2436,9 +2774,10 @@ fn auto_retry_partial_failure() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); // Configure the initial send path let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2446,24 +2785,30 @@ fn auto_retry_partial_failure() { let send_route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 2, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_2_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 2, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 2, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_2_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 2, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -2472,29 +2817,36 @@ fn auto_retry_partial_failure() { // Configure the retry1 paths let mut payment_params = route_params.payment_params.clone(); payment_params.previously_failed_channels.push(chan_2_id); - let mut retry_1_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2); + let mut retry_1_params = + RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2); retry_1_params.max_total_routing_fee_msat = None; let retry_1_route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 4, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_3_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 4, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 4, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_3_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 4, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(retry_1_params.clone()), }; @@ -2503,28 +2855,32 @@ fn auto_retry_partial_failure() { // Configure the retry2 path let mut payment_params = retry_1_params.payment_params.clone(); payment_params.previously_failed_channels.push(chan_3_id); - let mut retry_2_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4); + let mut retry_2_params = + RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4); retry_2_params.max_total_routing_fee_msat = None; let retry_2_route = Route { - paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + paths: vec![Path { + hops: vec![RouteHop { + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_id, channel_features: nodes[1].node.channel_features(), fee_msat: amt_msat / 4, cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(retry_2_params.clone()), }; nodes[0].router.expect_find_route(retry_2_params, Ok(retry_2_route)); // Send a payment that will partially fail on send, then partially fail on retry, then succeed. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(3)).unwrap(); + let payment_failed_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(payment_failed_events.len(), 2); match payment_failed_events[0] { @@ -2544,36 +2900,36 @@ fn auto_retry_partial_failure() { assert_eq!(msg_events.len(), 1); let mut payment_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let as_second_htlc_updates = SendEvent::from_node(&nodes[0]); + let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_ignore!(nodes[1]); @@ -2581,46 +2937,52 @@ fn auto_retry_partial_failure() { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, amt_msat); - let bs_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1); + let bs_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(bs_claim.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_claim.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); check_added_monitors!(nodes[0], 1); - let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_third_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); check_added_monitors!(nodes[1], 4); - let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_2nd_claim = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_third_cs); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[1]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); check_added_monitors!(nodes[0], 1); - let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_fourth_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_fourth_cs); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentPathSuccessful { .. } = events[0] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[1] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[1] { + } else { + panic!(); + } } #[test] @@ -2630,13 +2992,17 @@ fn auto_retry_zero_attempts_send_error() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open a single channel that does not have sufficient liquidity for the payment we want to // send. - let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_id = chan.0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 10_000_000; - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); + let (_, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2645,35 +3011,45 @@ fn auto_retry_zero_attempts_send_error() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); // Override the route search to return a route, rather than failing at the route-finding step. let send_route = Route { - paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + paths: vec![Path { + hops: vec![RouteHop { + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_id, channel_features: nodes[1].node.channel_features(), fee_msat: amt_msat, cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(route_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route)); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentPathFailed { .. } = events[0] { } else { panic!(); } - if let Event::PaymentFailed { .. } = events[1] { } else { panic!(); } + if let Event::PaymentPathFailed { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = events[1] { + } else { + panic!(); + } check_added_monitors!(nodes[0], 0); } @@ -2684,11 +3060,15 @@ fn fails_paying_after_rejected_by_payee() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 20_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2697,26 +3077,30 @@ fn fails_paying_after_rejected_by_payee() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, reason); } #[test] @@ -2727,12 +3111,15 @@ fn retry_multi_path_single_failed_payment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let amt_msat = 100_010_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2741,34 +3128,41 @@ fn retry_multi_path_single_failed_payment() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); - let mut route_params = RouteParameters::from_payment_params_and_value( - payment_params.clone(), amt_msat); + .with_bolt11_features(invoice_features) + .unwrap(); + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params.clone(), amt_msat); route_params.max_total_routing_fee_msat = None; let chans = nodes[0].node.list_usable_channels(); let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chans[0].short_channel_id.unwrap(), - channel_features: nodes[1].node.channel_features(), - fee_msat: 10_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chans[1].short_channel_id.unwrap(), - channel_features: nodes[1].node.channel_features(), - fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chans[0].short_channel_id.unwrap(), + channel_features: nodes[1].node.channel_features(), + fee_msat: 10_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chans[1].short_channel_id.unwrap(), + channel_features: nodes[1].node.channel_features(), + fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -2787,22 +3181,37 @@ fn retry_multi_path_single_failed_payment() { { let scorer = chanmon_cfgs[0].scorer.read().unwrap(); // The initial send attempt, 2 paths - scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); - scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); + let effective_capacity = EffectiveCapacity::Unknown; + let usage = ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); + let usage = + ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in. - scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown }); - scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); + let usage = ChannelUsage { + amount_msat: 50_000_001, + inflight_htlc_msat: 10_000, + effective_capacity, + }; + scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); + let usage = + ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); } - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, - failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. }}, - short_channel_id: Some(expected_scid), .. } => - { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. } }, + short_channel_id: Some(expected_scid), + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(expected_scid, route.paths[1].hops[0].short_channel_id); }, @@ -2821,11 +3230,14 @@ fn immediate_retry_on_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let amt_msat = 100_000_001; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2834,24 +3246,26 @@ fn immediate_retry_on_failure() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let chans = nodes[0].node.list_usable_channels(); let mut route = Route { - paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + paths: vec![Path { + hops: vec![RouteHop { + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chans[0].short_channel_id.unwrap(), channel_features: nodes[1].node.channel_features(), fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(route_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); @@ -2866,15 +3280,19 @@ fn immediate_retry_on_failure() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, - failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. }}, - short_channel_id: Some(expected_scid), .. } => - { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. } }, + short_channel_id: Some(expected_scid), + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(expected_scid, route.paths[1].hops[0].short_channel_id); }, @@ -2907,13 +3325,18 @@ fn no_extra_retries_on_back_to_back_fail() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); let chan_1_scid = chan_1.0.contents.short_channel_id; let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2922,48 +3345,61 @@ fn no_extra_retries_on_back_to_back_fail() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); route_params.max_total_routing_fee_msat = None; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -2974,7 +3410,8 @@ fn no_extra_retries_on_back_to_back_fail() { // On retry, we'll only return one path route.paths.remove(1); route.paths[0].hops[1].fee_msat = amt_msat; - let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat); + let mut retry_params = + RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat); retry_params.max_total_routing_fee_msat = None; route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); @@ -2982,54 +3419,59 @@ fn no_extra_retries_on_back_to_back_fail() { // We can't use the commitment_signed_dance macro helper because in this test we'll be sending // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a // time. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - let first_htlc_updates = SendEvent::from_node(&nodes[0]); + let first_htlc = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(first_htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let second_htlc_updates = SendEvent::from_node(&nodes[0]); - assert_eq!(second_htlc_updates.msgs.len(), 1); + let second_htlc = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc.msgs.len(), 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); + let next_hop_failure = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone(), next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(bs_fail_update.update_fail_htlcs.len(), 2); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_update.commitment_signed, false); // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two @@ -3047,7 +3489,11 @@ fn no_extra_retries_on_back_to_back_fail() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3058,7 +3504,11 @@ fn no_extra_retries_on_back_to_back_fail() { _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3069,27 +3519,38 @@ fn no_extra_retries_on_back_to_back_fail() { let retry_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { + Event::PaymentFailed { + payment_hash: ref ev_payment_hash, + payment_id: ref ev_payment_id, + reason: ref ev_reason, + } => { assert_eq!(Some(payment_hash), *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); @@ -3110,13 +3571,18 @@ fn test_simple_partial_retry() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); let chan_1_scid = chan_1.0.contents.short_channel_id; let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3125,48 +3591,61 @@ fn test_simple_partial_retry() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); route_params.max_total_routing_fee_msat = None; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 100_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 100_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -3177,7 +3656,8 @@ fn test_simple_partial_retry() { second_payment_params.previously_failed_channels = vec![chan_2_scid]; // On retry, we'll only be asked for one path (or 100k sats) route.paths.remove(0); - let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2); + let mut retry_params = + RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2); retry_params.max_total_routing_fee_msat = None; route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); @@ -3185,36 +3665,41 @@ fn test_simple_partial_retry() { // We can't use the commitment_signed_dance macro helper because in this test we'll be sending // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a // time. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let first_htlc_updates = SendEvent::from_node(&nodes[0]); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); + let first_htlc = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(first_htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + let next_hop_failure = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 2); { @@ -3222,13 +3707,13 @@ fn test_simple_partial_retry() { assert_eq!(msg_events.len(), 2); let mut handle_update_htlcs = |event: MessageSendEvent| { if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = event { - if node_id == nodes[0].node.get_our_node_id() { + if node_id == node_a_id { assert_eq!(updates.update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - } else if node_id == nodes[2].node.get_our_node_id() { + } else if node_id == node_c_id { assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[1], &updates.commitment_signed, false); } else { panic!("Unexpected node_id for UpdateHTLCs send"); @@ -3244,7 +3729,11 @@ fn test_simple_partial_retry() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3259,15 +3748,15 @@ fn test_simple_partial_retry() { let retry_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_second_forward_update.update_add_htlcs[0]); - commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward_update.commitment_signed, false); + let bs_second_forward = get_htlc_update_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[2]); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); @@ -3286,18 +3775,27 @@ fn test_threaded_payment_retries() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + // There is one mitigating guardrail when retrying payments - we can never over-pay by more // than 10% of the original value. Thus, we want all our retries to be below that. In order to // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only // our channel. - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0); - let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id; - let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id; + let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0); + let chan_3_scid = chan_3.0.contents.short_channel_id; + let chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0); + let chan_4_scid = chan_4.0.contents.short_channel_id; let amt_msat = 100_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3306,66 +3804,85 @@ fn test_threaded_payment_retries() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters { - payment_params, final_value_msat: amt_msat, max_total_routing_fee_msat: Some(500_000), + payment_params, + final_value_msat: amt_msat, + max_total_routing_fee_msat: Some(500_000), }; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[3].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown - channel_features: nodes[2].node.channel_features(), - fee_msat: amt_msat / 1000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: nodes[2].node.get_our_node_id(), - node_features: nodes[2].node.node_features(), - short_channel_id: chan_3_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: nodes[3].node.get_our_node_id(), - node_features: nodes[3].node.node_features(), - short_channel_id: chan_4_scid, - channel_features: nodes[3].node.channel_features(), - fee_msat: amt_msat - amt_msat / 1000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_d_id, + node_features: nodes[2].node.node_features(), + short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown + channel_features: nodes[2].node.channel_features(), + fee_msat: amt_msat / 1000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_3_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_d_id, + node_features: nodes[3].node.node_features(), + short_channel_id: chan_4_scid, + channel_features: nodes[3].node.channel_features(), + fee_msat: amt_msat - amt_msat / 1000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let retry = Retry::Attempts(0xdeadbeef); + nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); - send_msg_events.retain(|msg| + send_msg_events.retain(|msg| { if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, .. } = msg { // Drop the commitment update for nodes[2], we can just let that one sit pending // forever. - *node_id == nodes[1].node.get_our_node_id() - } else { panic!(); } - ); + *node_id == node_b_id + } else { + panic!(); + } + }); // from here on out, the retry `RouteParameters` amount will be amt/1000 route_params.final_value_msat /= 1000; @@ -3388,7 +3905,9 @@ fn test_threaded_payment_retries() { } } } } let mut threads = Vec::new(); - for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); } + for _ in 0..16 { + threads.push(std::thread::spawn(thread_body!())); + } // Back in the main thread, poll pending messages and make sure that we never have more than // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if @@ -3400,13 +3919,15 @@ fn test_threaded_payment_retries() { let send_event = SendEvent::from_event(send_msg_events.pop().unwrap()); assert_eq!(send_event.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + &[HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: route.paths[0].hops[1].short_channel_id + }] ); check_added_monitors(&nodes[1], 1); @@ -3416,25 +3937,29 @@ fn test_threaded_payment_retries() { // many HTLCs at once. let mut new_route_params = route_params.clone(); previously_failed_channels.push(route.paths[0].hops[1].short_channel_id); - new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone(); + new_route_params.payment_params.previously_failed_channels = + previously_failed_channels.clone(); new_route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 100_000); route.paths[0].hops[1].short_channel_id += 1; route.route_params = Some(new_route_params.clone()); nodes[0].router.expect_find_route(new_route_params, Ok(route.clone())); - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); + let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); // The "normal" commitment_signed_dance delivers the final RAA and then calls // `check_added_monitors` to ensure only the one RAA-generated monitor update was created. // This races with our other threads which may generate an add-HTLCs commitment update via // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates. - let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &last_raa); + let cs = bs_fail_updates.commitment_signed; + let last_raa = commitment_signed_dance!(nodes[0], nodes[1], cs, false, true, false, true); + nodes[0].node.handle_revoke_and_ack(node_b_id, &last_raa); let cur_time = Instant::now(); if cur_time > end_time { - for thread in threads.drain(..) { thread.join().unwrap(); } + for thread in threads.drain(..) { + thread.join().unwrap(); + } } // Make sure we have some events to handle when we go around... @@ -3455,23 +3980,27 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // it was last persisted. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let (persister_a, persister_b, persister_c); + let (persist_a, persist_b, persist_c); let (chain_monitor_a, chain_monitor_b, chain_monitor_c); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c); + let (node_a_1, node_a_2, node_a_3); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let mut nodes_0_serialized = Vec::new(); + let mut node_a_ser = Vec::new(); if !persist_manager_with_payment { - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); if persist_manager_with_payment { - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } nodes[1].node.claim_funds(our_payment_preimage); @@ -3479,14 +4008,14 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill_updates.commitment_signed, false); + let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill.commitment_signed, false); // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd // be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we // expect to get the PaymentSent again later. @@ -3495,13 +4024,21 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // The ChannelMonitor should always be the latest version, as we're required to persist it // during the commitment signed handling. - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_a_ser, &[&mon_ser], persist_a, chain_monitor_a, node_a_1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); } - if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); } + if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { + assert_eq!(payment_preimage, our_payment_preimage); + } else { + panic!(); + } // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid // the double-claim that would otherwise appear at the end of this test. nodes[0].node.timer_tick_occurred(); @@ -3513,23 +4050,31 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`. // A naive implementation of the fix here would wipe the pending payments set, causing a // failure event when we restart. - for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { + nodes[0].node.timer_tick_occurred(); + } - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let node_ser = nodes[0].node.encode(); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_ser, &[&mon_ser], persist_b, chain_monitor_b, node_a_2); let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); // Ensure that we don't generate any further events even after the channel-closing commitment // transaction is confirmed on-chain. confirm_transaction(&nodes[0], &as_broadcasted_txn[0]); - for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { + nodes[0].node.timer_tick_occurred(); + } let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let config = test_default_channel_config(); + let node_ser = nodes[0].node.encode(); + reload_node!(nodes[0], config, &node_ser, &[&mon_ser], persist_c, chain_monitor_c, node_a_3); let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); } @@ -3567,20 +4112,34 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes(&nodes, 2, 3); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000_000); - let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params, - None, nodes[0].node.compute_inflight_htlcs()).unwrap(); + let (payment_preimage, hash, secret) = get_payment_preimage_hash!(nodes[3]); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params, 10_000_000); + let inflight = nodes[0].node.compute_inflight_htlcs(); + let mut route = nodes[0].router.find_route(&node_a_id, &route_params, None, inflight).unwrap(); + // Make sure the route is ordered as the B->D path before C->D - route.paths.sort_by(|a, _| if a.hops[0].pubkey == nodes[1].node.get_our_node_id() { - std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }); + route.paths.sort_by(|a, _| { + if a.hops[0].pubkey == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } + }); // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while // the HTLC is being relayed. @@ -3589,22 +4148,30 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); + check_added_monitors(&nodes[0], 2); let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events(); send_msgs.sort_by(|a, _| { let a_node_id = if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; - let node_b_id = nodes[1].node.get_our_node_id(); - if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater } + let node_b_id = node_b_id; + if *a_node_id == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } }); assert_eq!(send_msgs.len(), 2); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000, - payment_hash, Some(payment_secret), send_msgs.remove(0), false, None); - let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000, - payment_hash, Some(payment_secret), send_msgs.remove(0), true, None); + let (msg_a, msg_b) = (send_msgs.remove(0), send_msgs.remove(0)); + let (path_a, path_b) = (&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]); + + pass_along_path(&nodes[0], path_a, 10_000_000, hash, Some(secret), msg_a, false, None); + let receive_event = + pass_along_path(&nodes[0], path_b, 10_000_000, hash, Some(secret), msg_b, true, None); match receive_event.unwrap() { Event::PaymentClaimable { claim_deadline, .. } => { @@ -3615,21 +4182,26 @@ fn do_claim_from_closed_chan(fail_payment: bool) { // Ensure that the claim_deadline is correct, with the payment failing at exactly the given // height. - connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1 - - if fail_payment { 0 } else { 2 }); - let error_message = "Channel force-closed"; + let blocks = final_cltv + - HTLC_FAIL_BACK_BUFFER + - nodes[3].best_block_info().1 + - if fail_payment { 0 } else { 2 }; + connect_blocks(&nodes[3], blocks); if fail_payment { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. - let reason = HTLCHandlingFailureType::Receive { payment_hash }; + let reason = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[path_a, path_b], false, hash, reason); } else { - nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - [nodes[3].node.get_our_node_id()], 1000000); + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, err).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(&nodes[1], 1, reason, false, [node_d_id], 1000000); check_closed_broadcast(&nodes[1], 1, true); let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_tx.len(), 1); @@ -3637,12 +4209,12 @@ fn do_claim_from_closed_chan(fail_payment: bool) { mine_transaction(&nodes[3], &bs_tx[0]); check_added_monitors(&nodes[3], 1); check_closed_broadcast(&nodes[3], 1, true); - check_closed_event!(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false, - [nodes[1].node.get_our_node_id()], 1000000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(&nodes[3], 1, reason, false, [node_b_id], 1000000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors(&nodes[3], 2); - expect_payment_claimed!(nodes[3], payment_hash, 10_000_000); + expect_payment_claimed!(nodes[3], hash, 10_000_000); let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(ds_tx.len(), 1); @@ -3656,28 +4228,35 @@ fn do_claim_from_closed_chan(fail_payment: bool) { check_added_monitors(&nodes[1], 1); assert_eq!(bs_claims.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - } else { panic!(); } + } else { + panic!(); + } expect_payment_sent!(nodes[0], payment_preimage); let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); assert_eq!(ds_claim_msgs.len(), 1); - let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] { - nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] + { + nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); cs_claim_msgs - } else { panic!(); }; + } else { + panic!(); + }; assert_eq!(cs_claim_msgs.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { - nodes[0].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); - } else { panic!(); } + } else { + panic!(); + } expect_payment_path_successful!(nodes[0]); } @@ -3709,35 +4288,38 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 100_000; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); - let payment_id = PaymentId(our_payment_hash.0); + let (mut route, hash, preimage, secret) = + get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let id = PaymentId(hash.0); let custom_tlvs = vec![ (if even_tlvs { 5482373482 } else { 5482373483 }, vec![1, 2, 3, 4]), (5482373487, vec![0x42u8; 16]), ]; - let onion_fields = RecipientOnionFields { - payment_secret: if spontaneous { None } else { Some(our_payment_secret) }, + let onion = RecipientOnionFields { + payment_secret: if spontaneous { None } else { Some(secret) }, payment_metadata: None, - custom_tlvs: custom_tlvs.clone() + custom_tlvs: custom_tlvs.clone(), }; if spontaneous { - nodes[0].node.send_spontaneous_payment( - Some(our_payment_preimage), onion_fields, payment_id, route.route_params.unwrap(), - Retry::Attempts(0) - ).unwrap(); + let params = route.route_params.unwrap(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(Some(preimage), onion, id, params, retry).unwrap(); } else { - nodes[0].node.send_payment_with_route(route, our_payment_hash, onion_fields, payment_id).unwrap(); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); } check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); let mut payment_event = SendEvent::from_event(ev); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -3753,25 +4335,26 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { match (known_tlvs, even_tlvs) { (true, _) => { - nodes[1].node.claim_funds_with_known_custom_tlvs(our_payment_preimage); + nodes[1].node.claim_funds_with_known_custom_tlvs(preimage); let expected_total_fee_msat = pass_claimed_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage) - .with_custom_tlvs(custom_tlvs) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) + .with_custom_tlvs(custom_tlvs), ); - expect_payment_sent!(&nodes[0], our_payment_preimage, Some(expected_total_fee_msat)); + expect_payment_sent!(&nodes[0], preimage, Some(expected_total_fee_msat)); }, (false, false) => { claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage) - .with_custom_tlvs(custom_tlvs) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) + .with_custom_tlvs(custom_tlvs), ); }, (false, true) => { - nodes[1].node.claim_funds(our_payment_preimage); - let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], expected_destinations); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, our_payment_hash, PaymentFailureReason::RecipientRejected); - } + nodes[1].node.claim_funds(preimage); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, hash, reason); + }, } } @@ -3783,65 +4366,66 @@ fn test_retry_custom_tlvs() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 2, 1); // Rebalance - send_payment(&nodes[2], &vec!(&nodes[1])[..], 1_500_000); + send_payment(&nodes[2], &[&nodes[1]], 1_500_000); let amt_msat = 1_000_000; - let (mut route, payment_hash, payment_preimage, payment_secret) = + let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); // Initiate the payment - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])]; - let onion_fields = RecipientOnionFields::secret_only(payment_secret); - let onion_fields = onion_fields.with_custom_tlvs(custom_tlvs.clone()).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let onion = onion.with_custom_tlvs(custom_tlvs.clone()).unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, onion_fields, - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. - let htlc_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let msgs::CommitmentUpdate { update_add_htlcs, commitment_signed, .. } = htlc_updates; assert_eq!(update_add_htlcs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2_id - }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors!(nodes[1], 1); - let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; assert_eq!(update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, - PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, conditions); // Rebalance the channel so the retry of the payment can succeed. - send_payment(&nodes[2], &vec!(&nodes[1])[..], 1_500_000); + send_payment(&nodes[2], &[&nodes[1]], 1_500_000); // Retry the payment and make sure it succeeds - route_params.payment_params.previously_failed_channels.push(chan_2_update.contents.short_channel_id); + let chan_2_scid = chan_2_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_2_scid); route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); @@ -3849,22 +4433,22 @@ fn test_retry_custom_tlvs() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, payment_hash, events.pop().unwrap()) + let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, hash, events.pop().unwrap()) .with_payment_secret(payment_secret) .with_custom_tlvs(custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - .with_custom_tlvs(custom_tlvs) + .with_custom_tlvs(custom_tlvs), ); } #[test] fn test_custom_tlvs_consistency() { let even_type_1 = 1 << 16; - let odd_type_1 = (1 << 16)+ 1; + let odd_type_1 = (1 << 16) + 1; let even_type_2 = (1 << 16) + 2; - let odd_type_2 = (1 << 16) + 3; + let odd_type_2 = (1 << 16) + 3; let value_1 = || vec![1, 2, 3, 4]; let differing_value_1 = || vec![1, 2, 3, 5]; let value_2 = || vec![42u8; 16]; @@ -3895,63 +4479,80 @@ fn test_custom_tlvs_consistency() { ); } -fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: Vec<(u64, Vec)>, - expected_receive_tlvs: Option)>>) { - +fn do_test_custom_tlvs_consistency( + first_tlvs: Vec<(u64, Vec)>, second_tlvs: Vec<(u64, Vec)>, + expected_receive_tlvs: Option)>>, +) { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + if path_a.hops[0].pubkey == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } }); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let payment_id = PaymentId([42; 32]); + let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); + let id = PaymentId([42; 32]); let amt_msat = 15_000_000; // Send first part - let onion_fields = RecipientOnionFields { - payment_secret: Some(our_payment_secret), + let onion = RecipientOnionFields { + payment_secret: Some(secret), payment_metadata: None, - custom_tlvs: first_tlvs + custom_tlvs: first_tlvs, }; - let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - onion_fields.clone(), payment_id, &route).unwrap(); + let session_privs = + nodes[0].node.test_add_new_pending_payment(hash, onion.clone(), id, &route).unwrap(); let cur_height = nodes[0].best_block_info().1; - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - onion_fields.clone(), amt_msat, cur_height, payment_id, - &None, session_privs[0]).unwrap(); + let path_a = &route.paths[0]; + let priv_a = session_privs[0]; + nodes[0] + .node + .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) + .unwrap(); check_added_monitors!(nodes[0], 1); - { - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, our_payment_hash, - Some(our_payment_secret), events.pop().unwrap(), false, None); - } + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path_a = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path_a, amt_msat, hash, Some(secret), event, false, None); + assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); // Send second part - let onion_fields = RecipientOnionFields { - payment_secret: Some(our_payment_secret), + let onion = RecipientOnionFields { + payment_secret: Some(secret), payment_metadata: None, - custom_tlvs: second_tlvs + custom_tlvs: second_tlvs, }; - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - onion_fields.clone(), amt_msat, cur_height, payment_id, &None, session_privs[1]).unwrap(); + let path_b = &route.paths[1]; + let priv_b = session_privs[1]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) + .unwrap(); check_added_monitors!(nodes[0], 1); { @@ -3959,7 +4560,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -3969,7 +4570,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); check_added_monitors!(nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); } @@ -3988,33 +4589,31 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: } do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage) - .with_custom_tlvs(expected_tlvs) + ClaimAlongRouteArgs::new(&nodes[0], &[path_a, &[&nodes[2], &nodes[3]]], preimage) + .with_custom_tlvs(expected_tlvs), ); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true); + expect_payment_sent(&nodes[0], preimage, Some(Some(2000)), true, true); } else { // Expect fail back - let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; + let expected_destinations = [HTLCHandlingFailureType::Receive { payment_hash: hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ - HTLCHandlingFailureType::Forward { - node_id: Some(nodes[3].node.get_our_node_id()), - channel_id: chan_2_3.2 - }]); + let fail = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail]); check_added_monitors!(nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, - PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions(&nodes[0], hash, true, conditions); } } @@ -4028,15 +4627,21 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain_mon; let mut config = test_default_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let nodes_0_deserialized; + let configs = [None, Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); + let node_d_reload; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let chan_id_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); @@ -4044,18 +4649,24 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { // Pay more than half of each channel's max, requiring MPP let amt_msat = 750_000_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3], Some(amt_msat)); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash!(nodes[3], Some(amt_msat)); let payment_id = PaymentId(payment_hash.0); let payment_metadata = vec![44, 49, 52, 142]; - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); // Send the MPP payment, delivering the updated commitment state to nodes[1]. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields { - payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata), custom_tlvs: vec![], - }, payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields { + payment_secret: Some(payment_secret), + payment_metadata: Some(payment_metadata), + custom_tlvs: vec![], + }; + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); check_added_monitors!(nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4063,44 +4674,50 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let first_send = SendEvent::from_event(send_events.pop().unwrap()); let second_send = SendEvent::from_event(send_events.pop().unwrap()); - let (b_recv_ev, c_recv_ev) = if first_send.node_id == nodes[1].node.get_our_node_id() { + let (b_recv_ev, c_recv_ev) = if first_send.node_id == node_b_id { (&first_send, &second_send) } else { (&second_send, &first_send) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &b_recv_ev.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &b_recv_ev.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors(&nodes[1], 1); let b_forward_ev = SendEvent::from_node(&nodes[1]); - nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &b_forward_ev.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_b_id, &b_forward_ev.msgs[0]); commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[3]); // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which // will result in nodes[2] failing the HTLC back. - nodes[2].node.peer_disconnected(nodes[3].node.get_our_node_id()); - nodes[3].node.peer_disconnected(nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(node_d_id); + nodes[3].node.peer_disconnected(node_c_id); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &c_recv_ev.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_id_cd }] ); check_added_monitors(&nodes[2], 1); - let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]); + let cs_fail = get_htlc_update_msgs(&nodes[2], &node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &cs_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true); let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events(); assert_eq!(payment_fail_retryable_evs.len(), 2); - if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] {} else { panic!(); } - if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] { + } else { + panic!(); + } + if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] { + } else { + panic!(); + } // Before we allow the HTLC to be retried, optionally change the payment_metadata we have // stored for our payment. @@ -4113,9 +4730,10 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { if do_reload { let mon_bd = get_monitor!(nodes[3], chan_id_bd).encode(); let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode(); - reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd], - persister, new_chain_monitor, nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[3].node.get_our_node_id()); + let mons = [&mon_bd[..], &mon_cd[..]]; + let node_d_ser = nodes[3].node.encode(); + reload_node!(nodes[3], config, &node_d_ser, &mons[..], persister, chain_mon, node_d_reload); + nodes[1].node.peer_disconnected(node_d_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3])); } let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]); @@ -4130,14 +4748,14 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[0].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[0], 1); let as_resend = SendEvent::from_node(&nodes[0]); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resend.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &as_resend.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors(&nodes[2], 1); let cs_forward = SendEvent::from_node(&nodes[2]); - let cd_channel_used = cs_forward.msgs[0].channel_id; - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]); + let cd_chan_id = cs_forward.msgs[0].channel_id; + nodes[3].node.handle_update_add_htlc(node_c_id, &cs_forward.msgs[0]); commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true); // Finally, check that nodes[3] does the correct thing - either accepting the payment or, if @@ -4146,23 +4764,26 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { if do_modify { expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Receive {payment_hash}]); + expect_pending_htlcs_forwardable_conditions( + nodes[3].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], + ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); - let ds_fail = get_htlc_update_msgs(&nodes[3], &nodes[2].node.get_our_node_id()); + let ds_fail = get_htlc_update_msgs(&nodes[3], &node_c_id); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); - expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + let events = nodes[2].node.get_and_clear_pending_events(); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_chan_id }; + expect_pending_htlcs_forwardable_conditions(events, &[fail_type]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let route: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, payment_preimage)); } } @@ -4175,7 +4796,7 @@ fn test_payment_metadata_consistency() { } #[test] -fn test_htlc_forward_considers_anchor_outputs_value() { +fn test_htlc_forward_considers_anchor_outputs_value() { // Tests that: // // 1) Forwarding nodes don't forward HTLCs that would cause their balance to dip below the @@ -4193,21 +4814,40 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // discovery of this bug. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + const CHAN_AMT: u64 = 1_000_000; const PUSH_MSAT: u64 = 900_000_000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_AMT, 500_000_000); - let (_, _, chan_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_AMT, PUSH_MSAT); + let (_, _, chan_id_2, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_AMT, PUSH_MSAT); - let channel_reserve_msat = get_holder_selected_channel_reserve_satoshis(CHAN_AMT, &config) * 1000; + let channel_reserve_msat = + get_holder_selected_channel_reserve_satoshis(CHAN_AMT, &config) * 1000; let commitment_fee_msat = chan_utils::commit_tx_fee_sat( - *nodes[1].fee_estimator.sat_per_kw.lock().unwrap(), 2, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() + *nodes[1].fee_estimator.sat_per_kw.lock().unwrap(), + 2, + &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), ) * 1000; let anchor_outpus_value_msat = ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000; - let sendable_balance_msat = CHAN_AMT * 1000 - PUSH_MSAT - channel_reserve_msat - commitment_fee_msat - anchor_outpus_value_msat; - let channel_details = nodes[1].node.list_channels().into_iter().find(|channel| channel.channel_id == chan_id_2).unwrap(); + let sendable_balance_msat = CHAN_AMT * 1000 + - PUSH_MSAT + - channel_reserve_msat + - commitment_fee_msat + - anchor_outpus_value_msat; + let channel_details = nodes[1] + .node + .list_channels() + .into_iter() + .find(|channel| channel.channel_id == chan_id_2) + .unwrap(); assert!(sendable_balance_msat >= channel_details.next_outbound_htlc_minimum_msat); assert!(sendable_balance_msat <= channel_details.next_outbound_htlc_limit_msat); @@ -4217,36 +4857,37 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // Send out an HTLC that would cause the forwarding node to dip below its reserve when // considering the value of anchor outputs. let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[2], sendable_balance_msat + anchor_outpus_value_msat + nodes[0], + nodes[2], + sendable_balance_msat + anchor_outpus_value_msat ); - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let mut update_add_htlc = if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - check_added_monitors(&nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); - updates.update_add_htlcs[0].clone() - } else { - panic!("Unexpected event"); - }; + let mut update_add_htlc = + if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + check_added_monitors(&nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); + updates.update_add_htlcs[0].clone() + } else { + panic!("Unexpected event"); + }; // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_id_2 - }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors(&nodes[0], 0); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); } else { @@ -4258,10 +4899,11 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // Assume that the forwarding node did forward it, and make sure the recipient rejects it as an // invalid update and closes the channel. update_add_htlc.channel_id = chan_id_2; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_htlc); - check_closed_event(&nodes[2], 1, ClosureReason::ProcessingError { - err: "Remote HTLC add would put them under remote reserve value".to_owned() - }, false, &[nodes[1].node.get_our_node_id()], 1_000_000); + nodes[2].node.handle_update_add_htlc(node_b_id, &update_add_htlc); + + let err = "Remote HTLC add would put them under remote reserve value".to_owned(); + let reason = ClosureReason::ProcessingError { err }; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 1_000_000); check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); } @@ -4272,25 +4914,37 @@ fn peel_payment_onion_custom_tlvs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let secp_ctx = Secp256k1::new(); let amt_msat = 1000; - let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), - TEST_FINAL_CLTV, false); + let payment_params = PaymentParameters::for_keysend(node_b_id, TEST_FINAL_CLTV, false); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); let mut recipient_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap(); + .with_custom_tlvs(vec![(414141, vec![42; 1200])]) + .unwrap(); let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted"); let keysend_preimage = PaymentPreimage([42; 32]); let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array()); let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion( - &secp_ctx, &route.paths[0], &session_priv, amt_msat, &recipient_onion, - nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), None, prng_seed - ).unwrap(); + &secp_ctx, + &route.paths[0], + &session_priv, + amt_msat, + &recipient_onion, + nodes[0].best_block_info().1, + &payment_hash, + &Some(keysend_preimage), + None, + prng_seed, + ) + .unwrap(); let update_add = msgs::UpdateAddHTLC { channel_id: ChannelId([0; 32]), @@ -4303,9 +4957,14 @@ fn peel_payment_onion_custom_tlvs() { blinding_point: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( - &update_add, &chanmon_cfgs[1].keys_manager, &chanmon_cfgs[1].logger, &secp_ctx, - nodes[1].best_block_info().1, false - ).unwrap(); + &update_add, + &chanmon_cfgs[1].keys_manager, + &chanmon_cfgs[1].logger, + &secp_ctx, + nodes[1].best_block_info().1, + false, + ) + .unwrap(); assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat)); match peeled_onion.routing { PendingHTLCRouting::ReceiveKeysend { @@ -4318,7 +4977,7 @@ fn peel_payment_onion_custom_tlvs() { assert!(payment_metadata.is_none()); assert!(payment_data.is_none()); }, - _ => panic!() + _ => panic!(), } } @@ -4326,34 +4985,47 @@ fn peel_payment_onion_custom_tlvs() { fn test_non_strict_forwarding() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut config = test_default_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); + let configs = [Some(config.clone()), Some(config.clone()), Some(config)]; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create a routing node with two outbound channels, each of which can forward 2 payments of // the given value. let payment_value = 1_500_000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); - let (chan_update_1, _, channel_id_1, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0); - let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); + let (chan_update_1, _, channel_id_1, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0); + let (chan_update_2, _, channel_id_2, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); // Create a route once. - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); - let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value); + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, payment_value); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); // Send 4 payments over the same route. for i in 0..4 { - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4365,12 +5037,9 @@ fn test_non_strict_forwarding() { // applying non-strict forwarding. // The channel with the least amount of outbound liquidity will be used to maximize the // probability of being able to successfully forward a subsequent HTLC. - assert_eq!(send_event.msgs[0].channel_id, if i < 2 { - channel_id_1 - } else { - channel_id_2 - }); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + let exp_id = if i < 2 { channel_id_1 } else { channel_id_2 }; + assert_eq!(send_event.msgs[0].channel_id, exp_id); + nodes[2].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -4378,39 +5047,47 @@ fn test_non_strict_forwarding() { assert_eq!(events.len(), 1); assert!(matches!(events[0], Event::PaymentClaimable { .. })); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + payment_preimage, + )); } // Send a 5th payment which will fail. - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (_, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; - let routed_channel_id = match routed_scid { + let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, scid if scid == chan_update_2.contents.short_channel_id => channel_id_2, _ => panic!("Unexpected short channel id in route"), }; // The failure to forward will refer to the channel given in the onion. - expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + let events = nodes[1].node.get_and_clear_pending_events(); + let fail = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_chan_id }; + expect_pending_htlcs_forwardable_conditions(events, &[fail]); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); - expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); + let conditions = PaymentFailedConditions::new().blamed_scid(routed_scid); + expect_payment_failed_conditions_event(events, payment_hash, false, conditions); } #[test] @@ -4421,13 +5098,18 @@ fn remove_pending_outbounds_on_buggy_router() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; let payment_id = PaymentId([42; 32]); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); // Extend the path by itself, essentially simulating route going through same channel twice let cloned_hops = route.paths[0].hops.clone(); @@ -4435,26 +5117,25 @@ fn remove_pending_outbounds_on_buggy_router() { let route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment( - payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, route_params, - Retry::Attempts(1) // Even though another attempt is allowed, the payment should fail - ).unwrap(); + // Send the payment with one retry allowed, but the payment should still fail + let onion = RecipientOnionFields::secret_only(payment_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { Event::PaymentPathFailed { failure, payment_failed_permanently, .. } => { - assert_eq!(failure, &PathFailure::InitialSend { - err: APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } - }); + let err = "Path went through the same channel twice".to_string(); + assert_eq!(failure, &PathFailure::InitialSend { err: APIError::InvalidRoute { err } }); assert!(!payment_failed_permanently); }, - _ => panic!() + _ => panic!(), } match events[1] { Event::PaymentFailed { reason, .. } => { assert_eq!(reason.unwrap(), PaymentFailureReason::UnexpectedError); }, - _ => panic!() + _ => panic!(), } assert!(nodes[0].node.list_recent_payments().is_empty()); } @@ -4467,12 +5148,17 @@ fn remove_pending_outbound_probe_on_buggy_path() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, _, _, _) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); // Extend the path by itself, essentially simulating route going through same channel twice let cloned_hops = route.paths[0].hops.clone(); @@ -4480,9 +5166,9 @@ fn remove_pending_outbound_probe_on_buggy_path() { assert_eq!( nodes[0].node.send_probe(route.paths.pop().unwrap()).unwrap_err(), - ProbeSendFailure::ParameterError( - APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } - ) + ProbeSendFailure::ParameterError(APIError::InvalidRoute { + err: "Path went through the same channel twice".to_string() + }) ); assert!(nodes[0].node.list_recent_payments().is_empty()); } @@ -4495,25 +5181,29 @@ fn pay_route_without_params() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, hash, preimage, secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); route.route_params.take(); - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ).unwrap(); + + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) - ); + let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, hash, Some(secret), node_1_msgs, true, None); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage)); } #[test] @@ -4543,9 +5233,8 @@ fn max_out_mpp_path() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs( - 3, &node_cfgs, &[Some(user_cfg.clone()), Some(lsp_cfg.clone()), Some(user_cfg.clone())] - ); + let configs = [Some(user_cfg.clone()), Some(lsp_cfg), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 200_000, 0); @@ -4560,7 +5249,9 @@ fn max_out_mpp_path() { let invoice = nodes[2].node.create_bolt11_invoice(invoice_params).unwrap(); let route_params_cfg = crate::routing::router::RouteParametersConfig::default(); - nodes[0].node.pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, route_params_cfg, Retry::Attempts(0)).unwrap(); + let id = PaymentId([42; 32]); + let retry = Retry::Attempts(0); + nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, retry).unwrap(); assert!(nodes[0].node.list_recent_payments().len() == 1); check_added_monitors(&nodes[0], 2); // one monitor update per MPP part diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs new file mode 100644 index 00000000000..284f56cab3f --- /dev/null +++ b/lightning/src/ln/update_fee_tests.rs @@ -0,0 +1,988 @@ +//! Functional tests testing channel feerate handling. + +use crate::events::{ClosureReason, Event}; +use crate::ln::chan_utils::{ + self, commitment_tx_base_weight, CommitmentTransaction, HTLCOutputInCommitment, + COMMITMENT_TX_WEIGHT_PER_HTLC, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, + MIN_AFFORDABLE_HTLC_COUNT, +}; +use crate::ln::channelmanager::PaymentId; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::{ + self, BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, +}; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::types::features::ChannelTypeFeatures; +use crate::util::config::UserConfig; +use crate::util::errors::APIError; + +use lightning_macros::xtest; + +use bitcoin::secp256k1::Secp256k1; + +#[xtest(feature = "_externalize_tests")] +pub fn test_async_inbound_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &[&nodes[1]], 8000000); + + // A B + // update_fee -> + // send (1) commitment_signed -. + // <- update_add_htlc/commitment_signed + // send (2) RAA (awaiting remote revoke) -. + // (1) commitment_signed is delivered -> + // .- send (3) RAA (awaiting remote revoke) + // (2) RAA is delivered -> + // .- send (4) commitment_signed + // <- (3) RAA is delivered + // send (5) commitment_signed -. + // <- (4) commitment_signed is delivered + // send (6) RAA -. + // (5) commitment_signed is delivered -> + // <- RAA + // (6) RAA is delivered -> + + // First nodes[0] generates an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, node_a_id); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + // deliver(1), generate (3): + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[1], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) + let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(bs_update.update_add_htlcs.is_empty()); // (4) + assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.update_fee.is_none()); // (4) + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) + let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + assert!(as_update.update_add_htlcs.is_empty()); // (5) + assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.update_fee.is_none()); // (5) + check_added_monitors(&nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); // deliver (4) + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // only (6) so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update.commitment_signed); // deliver (5) + let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); + check_added_monitors(&nodes[0], 1); + + let events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PendingHTLCsForwardable { .. } => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) + check_added_monitors(&nodes[1], 1); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_unordered_raa() { + // Just the intro to the previous test followed by an out-of-order RAA (which caused a + // crash in an earlier version of the update_fee patch) + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &[&nodes[1]], 8000000); + + // First nodes[0] generates an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let update_msg = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, .. }, + .. + } => update_fee.as_ref(), + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, node_a_id); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); // deliver (2) + check_added_monitors(&nodes[1], 1); + + // We can't continue, sadly, because our (1) now has a bogus signature +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_multi_flight_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // A B + // update_fee/commitment_signed -> + // .- send (1) RAA and (2) commitment_signed + // update_fee (never committed) -> + // (3) update_fee -> + // We have to manually generate the above update_fee, it is allowed by the protocol but we + // don't track which updates correspond to which revoke_and_ack responses so we're in + // AwaitingRAA mode and will not generate the update_fee yet. + // <- (1) RAA delivered + // (3) is generated and send (4) CS -. + // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't + // know the per_commitment_point to use for it. + // <- (2) commitment_signed delivered + // revoke_and_ack -> + // B should send no response here + // (4) commitment_signed delivered -> + // <- RAA/commitment_signed delivered + // revoke_and_ack -> + + // First nodes[0] generates an update_fee + let initial_feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + initial_feerate = *feerate_lock; + *feerate_lock = initial_feerate + 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg_1, commitment_signed_1) = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref().unwrap(), commitment_signed), + _ => panic!("Unexpected event"), + }; + + // Deliver first update_fee/commitment_signed pair, generating (1) and (2): + nodes[1].node.handle_update_fee(node_a_id, update_msg_1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed_1); + let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment + // transaction: + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = initial_feerate + 40; + } + nodes[0].node.timer_tick_occurred(); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Create the (3) update_fee message that nodes[0] will generate before it does... + let mut update_msg_2 = msgs::UpdateFee { + channel_id: update_msg_1.channel_id.clone(), + feerate_per_kw: (initial_feerate + 30) as u32, + }; + + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); + + update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; + // Deliver (3) + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); + + // Deliver (1), generating (3) and (4) + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); + let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); + check_added_monitors(&nodes[0], 1); + assert!(as_second_update.update_add_htlcs.is_empty()); + assert!(as_second_update.update_fulfill_htlcs.is_empty()); + assert!(as_second_update.update_fail_htlcs.is_empty()); + assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); + // Check that the update_fee newly generated matches what we delivered: + assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!( + as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, + update_msg_2.feerate_per_kw + ); + + // Deliver (2) commitment_signed + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + check_added_monitors(&nodes[0], 1); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); + + // Delever (4) + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); + let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment); + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_vanilla() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 25; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[1], 1); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_that_funder_cannot_afford() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let channel_value = 5000; + let push_sats = 700; + let chan = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + channel_value, + push_sats * 1000, + ); + let channel_id = chan.2; + let secp_ctx = Secp256k1::new(); + let default_config = UserConfig::default(); + let bs_channel_reserve_sats = + get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee + // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we + // calculate two different feerates here - the expected local limit as well as the expected + // remote limit. + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 + / (commitment_tx_base_weight(&channel_type_features) + + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) + as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 + / commitment_tx_base_weight(&channel_type_features)) as u32; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); + + commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); + + // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. + { + let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); + + //We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), 2); + let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; + let mut actual_fee = + commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); + actual_fee = channel_value - actual_fee; + assert_eq!(total_fee, actual_fee); + } + + { + // Increment the feerate by a small constant, accounting for rounding errors + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 4; + } + nodes[0].node.timer_tick_occurred(); + let err = format!("Cannot afford to send new feerate at {}", feerate + 4); + nodes[0].logger.assert_log("lightning::ln::channel", err, 1); + check_added_monitors(&nodes[0], 0); + + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; + + let remote_point = { + let mut per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + let point_number = INITIAL_COMMITMENT_NUMBER - 1; + chan_signer.as_ref().get_per_commitment_point(point_number, &secp_ctx).unwrap() + }; + + let res = { + let mut per_peer_lock; + let mut peer_state_lock; + + let local_chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); + + let nondust_htlcs: Vec = vec![]; + let commitment_tx = CommitmentTransaction::new( + INITIAL_COMMITMENT_NUMBER - 1, + &remote_point, + push_sats, + channel_value + - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) + / 1000, + non_buffer_feerate + 4, + nondust_htlcs, + &local_chan.funding().channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + let params = &local_chan.funding().channel_transaction_parameters; + local_chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + let update_fee = msgs::UpdateFee { channel_id: chan.2, feerate_per_kw: non_buffer_feerate + 4 }; + + nodes[1].node.handle_update_fee(node_a_id, &update_fee); + + //While producing the commitment_signed response after handling a received update_fee request the + //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) + //Should produce and error. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let err = "Funding remote cannot afford proposed new fee"; + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + let reason = ClosureReason::ProcessingError { err: err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], channel_value); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_that_saturates_subs() { + // Check that when a remote party sends us an `update_fee` message that results in a total fee + // on the commitment transaction that is greater than her balance, we saturate the subtractions, + // and force close the channel. + + let mut default_config = test_default_channel_config(); + let secp_ctx = Secp256k1::new(); + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; + + const FEERATE: u32 = 250 * 10; // 10sat/vb + + // Assert that the new feerate will completely exhaust the balance of node 0, and saturate the + // subtraction of the total fee from node 0's balance. + let total_fee_sat = chan_utils::commit_tx_fee_sat(FEERATE, 0, &ChannelTypeFeatures::empty()); + assert!(total_fee_sat > 1500); + + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; + + // We build a commitment transcation here only to pass node 1's check of node 0's signature + // in `commitment_signed`. + + let remote_point = { + let mut per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan_id); + let chan_signer = channel.as_funded().unwrap().get_signer(); + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() + }; + + let res = { + let mut per_peer_lock; + let mut peer_state_lock; + + let local_chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); + let nondust_htlcs: Vec = vec![]; + let commitment_tx = CommitmentTransaction::new( + INITIAL_COMMITMENT_NUMBER, + &remote_point, + 8500, + // Set a zero balance here: this is the transaction that node 1 will expect a signature for, as + // he will do a saturating subtraction of the total fees from node 0's balance. + 0, + FEERATE, + nondust_htlcs, + &local_chan.funding().channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + let params = &local_chan.funding().channel_transaction_parameters; + local_chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan_id, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + let update_fee = msgs::UpdateFee { channel_id: chan_id, feerate_per_kw: FEERATE }; + + nodes[1].node.handle_update_fee(node_a_id, &update_fee); + + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let err = "Funding remote cannot afford proposed new fee"; + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + let reason = ClosureReason::ProcessingError { err: err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 10_000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_with_fundee_update_add_htlc() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &[&nodes[1]], 8000000); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 800000); + + // nothing happens since node[1] is in AwaitingRemoteRevoke + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + // node[1] has nothing to do + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors(&nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + check_added_monitors(&nodes[1], 1); + // AwaitingRemoteRevoke ends here + + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(commitment_update.update_add_htlcs.len(), 1); + assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); + assert_eq!(commitment_update.update_fee.is_none(), true); + + nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + check_added_monitors(&nodes[0], 1); + let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); + check_added_monitors(&nodes[1], 1); + let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke); + check_added_monitors(&nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[0]); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentClaimable { .. } => {}, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[1], &[&nodes[0]], our_payment_preimage); + + send_payment(&nodes[1], &[&nodes[0]], 800000); + send_payment(&nodes[0], &[&nodes[1]], 800000); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + // A B + // (1) update_fee/commitment_signed -> + // <- (2) revoke_and_ack + // .- send (3) commitment_signed + // (4) update_fee/commitment_signed -> + // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) + // <- (3) commitment_signed delivered + // send (6) revoke_and_ack -. + // <- (5) deliver revoke_and_ack + // (6) deliver revoke_and_ack -> + // .- send (7) commitment_signed in response to (4) + // <- (7) deliver commitment_signed + // revoke_and_ack -> + + // Create and deliver (1)... + let feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + feerate = *feerate_lock; + *feerate_lock = feerate + 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // Generate (2) and (3): + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors(&nodes[1], 1); + + // Deliver (2): + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[0], 1); + + // Create and deliver (4)... + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate + 30; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + check_added_monitors(&nodes[1], 1); + // ... creating (5) + let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + // Handle (3), creating (6): + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_0); + check_added_monitors(&nodes[0], 1); + let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + // Deliver (5): + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors(&nodes[0], 1); + + // Deliver (6), creating (7): + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(commitment_update.update_add_htlcs.is_empty()); + assert!(commitment_update.update_fulfill_htlcs.is_empty()); + assert!(commitment_update.update_fail_htlcs.is_empty()); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(commitment_update.update_fee.is_none()); + check_added_monitors(&nodes[1], 1); + + // Deliver (7) + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + check_added_monitors(&nodes[0], 1); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + check_added_monitors(&nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_init_feerate_unaffordability() { + // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to + // channel reserve and feerate requirements. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single + // HTLC. + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); + assert_eq!(nodes[0].node.create_channel(node_b_id, 100_000, push_amt + 1, 42, None, None).unwrap_err(), + APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); + + // During open, we don't have a "counterparty channel reserve" to check against, so that + // requirement only comes into play on the open_channel handling side. + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + nodes[0].node.create_channel(node_b_id, 100_000, push_amt, 42, None, None).unwrap(); + let mut open_channel_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + open_channel_msg.push_msat += 1; + nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, .. + } => { + assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); + }, + _ => panic!("Unexpected event"), + } +} + +#[xtest(feature = "_externalize_tests")] +pub fn accept_busted_but_better_fee() { + // If a peer sends us a fee update that is too low, but higher than our previous channel + // feerate, we should accept it. In the future we may want to consider closing the channel + // later, but for now we only accept the update. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Set nodes[1] to expect 5,000 sat/kW. + { + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 5000; + } + + // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 1000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + }; + + // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept + // it. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 2000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + }; + + // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the + // channel. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 1000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors(&nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, .. }, + .. + } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + let reason = ClosureReason::PeerFeerateTooLow { + peer_feerate_sat_per_kw: 1000, + required_feerate_sat_per_kw: 5000, + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); + }, + _ => panic!("Unexpected event"), + }; +} diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index de717db86ae..63cf3227bd2 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -15,7 +15,6 @@ lightning/src/ln/chanmon_update_fail_tests.rs lightning/src/ln/channel.rs lightning/src/ln/channelmanager.rs lightning/src/ln/functional_test_utils.rs -lightning/src/ln/functional_tests.rs lightning/src/ln/inbound_payment.rs lightning/src/ln/invoice_utils.rs lightning/src/ln/max_payment_path_len_tests.rs @@ -26,7 +25,6 @@ lightning/src/ln/offers_tests.rs lightning/src/ln/onion_payment.rs lightning/src/ln/onion_route_tests.rs lightning/src/ln/outbound_payment.rs -lightning/src/ln/payment_tests.rs lightning/src/ln/peer_channel_encryptor.rs lightning/src/ln/peer_handler.rs lightning/src/ln/priv_short_conf_tests.rs