diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index 48b383ad1ea..b8f3eb0bd99 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -1,4 +1,3 @@ -use lightning::check_closed_broadcast; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::*; use lightning::util::persist::{ @@ -188,7 +187,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { .unwrap(); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -202,7 +201,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { vec![node_txn[0].clone(), node_txn[0].clone()], ), ); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 100000); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/accountable_tests.rs b/lightning/src/ln/accountable_tests.rs index 16ca1425817..35c936f4dd6 100644 --- a/lightning/src/ln/accountable_tests.rs +++ b/lightning/src/ln/accountable_tests.rs @@ -26,7 +26,8 @@ fn test_accountable_forwarding_with_override( let _chan_ab = create_announced_chan_between_nodes(&nodes, 0, 1); let _chan_bc = create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], None, None); let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), 100_000, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index f34a2b3275c..d8dc22caca8 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1308,9 +1308,9 @@ fn do_test_closing_signed(extra_closing_signed: bool, reconnect: bool) { } nodes[0].node.signer_unblocked(None); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_closing_signed) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_closing_signed.is_none()); assert!(nodes[0].node.list_channels().is_empty()); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index b421114e911..5e544c7502d 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -277,7 +277,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { }; nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message).unwrap(); check_added_monitors(&nodes[0], 1); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); // TODO: Once we hit the chain with the failure transaction we should check that we get a // PaymentPathFailed event @@ -1382,9 +1382,9 @@ fn raa_no_response_awaiting_raa_state() { let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let (payment_preimage_2, payment_hash_2, payment_secret_2) = - get_payment_preimage_hash!(nodes[1]); + get_payment_preimage_hash(&nodes[1], None, None); let (payment_preimage_3, payment_hash_3, payment_secret_3) = - get_payment_preimage_hash!(nodes[1]); + get_payment_preimage_hash(&nodes[1], None, None); // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA @@ -1872,7 +1872,7 @@ fn test_monitor_update_fail_claim() { do_commitment_signed_dance(&nodes[1], &nodes[2], &payment_event.commitment_msg, false, true); expect_htlc_failure_conditions(nodes[1].node.get_and_clear_pending_events(), &[]); - let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); + let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash(&nodes[0], None, None); let id_3 = PaymentId(payment_hash_3.0); let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); @@ -2509,7 +2509,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { mine_transaction(&nodes[1], &bs_txn[0]); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors(&nodes[1], 1); expect_and_process_pending_htlcs_and_htlc_handling_failed( @@ -2663,7 +2663,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); let (payment_preimage_2, payment_hash_2, payment_secret_2) = - get_payment_preimage_hash!(&nodes[1]); + get_payment_preimage_hash(&nodes[1], None, None); // Do a really complicated dance to get an HTLC into the holding cell, with // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any @@ -3048,11 +3048,11 @@ fn test_temporary_error_during_shutdown() { node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id), ); - let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, closing_signed_a) = get_closing_signed_broadcast(&nodes[0], node_b_id); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); nodes[1].node.handle_closing_signed(node_a_id, &closing_signed_a.unwrap()); - let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, none_b) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(none_b.is_none()); let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4043,7 +4043,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { }; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); check_added_monitors(&nodes[0], 1); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); @@ -4494,13 +4494,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_commit_tx.len(), 1); mine_transaction(&nodes[1], &as_commit_tx[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -4572,13 +4572,13 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { check_added_monitors(&nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, a_reason, &[node_b_id], 1000000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_commit_tx.len(), 1); mine_transaction(&nodes[1], &as_commit_tx[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let b_reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, b_reason, &[node_a_id], 1000000); @@ -5099,7 +5099,8 @@ fn test_mpp_claim_to_holding_cell() { send_along_route_with_secret(&nodes[0], route, paths, 500_000, paymnt_hash_1, payment_secret); // Put the C <-> D channel into AwaitingRaa - let (preimage_2, paymnt_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[3]); + let (preimage_2, paymnt_hash_2, payment_secret_2) = + get_payment_preimage_hash(&nodes[3], None, None); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId([42; 32]); let pay_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 869a431e757..270f00782fd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -20522,7 +20522,7 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[0], None, None); let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat: 100_000, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 66a0147e131..45cab14be31 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2210,31 +2210,31 @@ macro_rules! check_spends { } } -macro_rules! get_closing_signed_broadcast { - ($node: expr, $dest_pubkey: expr) => {{ - let events = $node.get_and_clear_pending_msg_events(); - assert!(events.len() == 1 || events.len() == 2); - ( - match events[events.len() - 1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } => { - assert_eq!(msg.contents.channel_flags & 2, 2); - msg.clone() +pub fn get_closing_signed_broadcast( + node: &Node, dest_pubkey: PublicKey, +) -> (msgs::ChannelUpdate, Option) { + let events = node.node.get_and_clear_pending_msg_events(); + assert!(events.len() == 1 || events.len() == 2); + ( + match events[events.len() - 1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg, .. } => { + assert_eq!(msg.contents.channel_flags & 2, 2); + msg.clone() + }, + _ => panic!("Unexpected event"), + }, + if events.len() == 2 { + match events[0] { + MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { + assert_eq!(*node_id, dest_pubkey); + Some(msg.clone()) }, _ => panic!("Unexpected event"), - }, - if events.len() == 2 { - match events[0] { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, $dest_pubkey); - Some(msg.clone()) - }, - _ => panic!("Unexpected event"), - } - } else { - None - }, - ) - }}; + } + } else { + None + }, + ) } #[cfg(test)] @@ -2313,17 +2313,6 @@ pub fn check_closed_broadcast( .collect() } -/// Check that a channel's closing channel update has been broadcasted, and optionally -/// check whether an error message event has occurred. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! check_closed_broadcast { - ($node: expr, $with_error_msg: expr) => { - $crate::ln::functional_test_utils::check_closed_broadcast(&$node, 1, $with_error_msg).pop() - }; -} - #[derive(Default)] pub struct ExpectedCloseEvent { pub channel_capacity_sats: Option, @@ -2530,10 +2519,10 @@ pub fn close_channel<'a, 'b, 'c>( assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); let (bs_update, closing_signed_b) = - get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + get_closing_signed_broadcast(struct_b, node_a.get_our_node_id()); node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b.unwrap()); - let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + let (as_update, none_a) = get_closing_signed_broadcast(struct_a, node_b.get_our_node_id()); assert!(none_a.is_none()); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); @@ -2550,10 +2539,10 @@ pub fn close_channel<'a, 'b, 'c>( assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); let (as_update, closing_signed_a) = - get_closing_signed_broadcast!(node_a, node_b.get_our_node_id()); + get_closing_signed_broadcast(struct_a, node_b.get_our_node_id()); node_b.handle_closing_signed(node_a.get_our_node_id(), &closing_signed_a.unwrap()); - let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id()); + let (bs_update, none_b) = get_closing_signed_broadcast(struct_b, node_a.get_our_node_id()); assert!(none_b.is_none()); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); @@ -2829,26 +2818,6 @@ pub fn get_payment_preimage_hash( (payment_preimage, payment_hash, payment_secret) } -/// Get a payment preimage and hash. -/// -/// Don't use this, use the identically-named function instead. -#[macro_export] -macro_rules! get_payment_preimage_hash { - ($dest_node: expr) => { - get_payment_preimage_hash!($dest_node, None) - }; - ($dest_node: expr, $min_value_msat: expr) => { - $crate::get_payment_preimage_hash!($dest_node, $min_value_msat, None) - }; - ($dest_node: expr, $min_value_msat: expr, $min_final_cltv_expiry_delta: expr) => { - $crate::ln::functional_test_utils::get_payment_preimage_hash( - &$dest_node, - $min_value_msat, - $min_final_cltv_expiry_delta, - ) - }; -} - /// Gets a route from the given sender to the node described in `payment_params`. pub fn get_route(send_node: &Node, route_params: &RouteParameters) -> Result { let scorer = TestScorer::new(); @@ -3820,7 +3789,7 @@ pub fn send_along_route<'a, 'b, 'c>( recv_value: u64, ) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) { let (our_payment_preimage, our_payment_hash, our_payment_secret) = - get_payment_preimage_hash!(expected_route.last().unwrap()); + get_payment_preimage_hash(expected_route.last().unwrap(), None, None); let payment_id = send_along_route_with_secret( origin_node, route, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6fe0c83dfe8..be90130fb63 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -526,7 +526,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; check_closed_event(&nodes[2], 1, reason, &[node_b_id], 100_000); check_added_monitors(&nodes[2], 1); @@ -618,7 +618,7 @@ pub fn channel_monitor_network_test() { .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); { @@ -650,7 +650,7 @@ pub fn channel_monitor_network_test() { .node .force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); @@ -704,7 +704,7 @@ pub fn channel_monitor_network_test() { .force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, message.clone()) .unwrap(); check_added_monitors(&nodes[2], 1); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); @@ -1247,7 +1247,7 @@ pub fn do_test_multiple_package_conflicts(p2a_anchor: bool) { mine_transaction(&nodes[1], node2_commit_tx); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_c_id], CHAN_CAPACITY); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); // Node 1 should immediately claim package 1 but has to wait a block to claim package 2. @@ -1288,7 +1288,7 @@ pub fn do_test_multiple_package_conflicts(p2a_anchor: bool) { mine_transaction(&nodes[2], node2_commit_tx); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[2], 1, reason, &[node_b_id], CHAN_CAPACITY); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let process_bump_event = |node: &Node| { @@ -1463,7 +1463,7 @@ pub fn test_htlc_on_chain_success() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); mine_transaction(&nodes[2], &commitment_tx[0]); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[2], 1, reason, &[node_b_id], 100000); @@ -1585,7 +1585,7 @@ pub fn test_htlc_on_chain_success() { let node_a_commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); check_spends!(node_a_commitment_tx[0], chan_1.3); mine_transaction(&nodes[1], &node_a_commitment_tx[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -1620,7 +1620,7 @@ pub fn test_htlc_on_chain_success() { let txn = vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]; connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); check_added_monitors(&nodes[0], 2); @@ -1728,7 +1728,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { _ => panic!("Unexpected event"), }; mine_transaction(&nodes[2], &commitment_tx[0]); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[2], 1, reason, &[node_b_id], 100000); @@ -1772,7 +1772,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[1], &timeout_tx); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -1812,7 +1812,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[0], &commitment_tx[0]); connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1864,7 +1864,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); expect_and_process_pending_htlcs_and_htlc_handling_failed( &nodes[1], @@ -2336,7 +2336,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { }, _ => panic!("Unexpected event {:?}", events[1]), } - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); } @@ -2373,7 +2373,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -2385,7 +2385,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]); connect_block(&nodes[1], &block); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -2454,7 +2454,7 @@ pub fn test_force_close_fail_back() { .node .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[node_b_id], 100000); @@ -2471,7 +2471,7 @@ pub fn test_force_close_fail_back() { mine_transaction(&nodes[1], &commitment_tx); // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); @@ -3530,7 +3530,7 @@ pub fn test_claim_sizeable_push_msat() { .node .force_close_broadcasting_latest_txn(&chan.2, &node_a_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3571,7 +3571,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { .node .force_close_broadcasting_latest_txn(&chan.2, &node_b_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -3582,7 +3582,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening mine_transaction(&nodes[1], &node_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3615,7 +3615,7 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3762,7 +3762,7 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b } mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3820,7 +3820,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // A will generate HTLC-Timeout from revoked commitment tx mine_transaction(&nodes[0], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -3843,7 +3843,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // B will generate justice tx from A's revoked commitment/HTLC tx let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3904,7 +3904,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // B will generate HTLC-Success from revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -3925,7 +3925,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // A will generate justice tx from B's revoked commitment/HTLC tx let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -4011,7 +4011,7 @@ pub fn test_onchain_to_onchain_claim() { assert!(updates.update_fail_malformed_htlcs.is_empty()); mine_transaction(&nodes[2], &commitment_tx[0]); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[2], 1, reason, &[node_b_id], 100000); @@ -4107,7 +4107,7 @@ pub fn test_onchain_to_onchain_claim() { assert!(b_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); } @@ -4173,7 +4173,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { check_spends!(commitment_txn[0], chan_2.3); mine_transaction(&nodes[1], &commitment_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_c_id], 100000); @@ -4576,7 +4576,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); if deliver_last_raa { nodes[2].node.process_pending_htlc_forwards(); @@ -4808,7 +4808,7 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx mine_transaction(&nodes[0], &local_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -4936,7 +4936,7 @@ pub fn test_key_derivation_params() { // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx mine_transaction(&nodes[0], &local_txn_1[0]); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -5045,7 +5045,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { } let htlc_type = if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }; test_txn_broadcast(&nodes[1], &chan, None, htlc_type); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -5086,7 +5086,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { block.header.prev_blockhash = block.block_hash(); } test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(payment_hash) }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -5147,7 +5147,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no } if !check_revoke_no_close { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HTLCsTimedOut { payment_hash: Some(our_payment_hash) }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -5870,7 +5870,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { mine_transaction(&nodes[0], &as_prev_commitment_tx[0]); } - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -5954,7 +5954,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &as_commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let conditions = PaymentFailedConditions::new().from_mon_update(); @@ -5977,7 +5977,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { } else { // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC mine_transaction(&nodes[0], &bs_commitment_tx[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -6057,7 +6057,7 @@ pub fn test_check_htlc_underpaying() { ) .unwrap(); - let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); + let (_, our_payment_hash, _) = get_payment_preimage_hash(&nodes[0], None, None); let our_payment_secret = nodes[1] .node .create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None) @@ -6706,7 +6706,7 @@ pub fn test_counterparty_raa_skip_no_crash() { }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!( - check_closed_broadcast!(nodes[1], true).unwrap().data, + check_closed_broadcast(&nodes[1], 1, true).pop().unwrap().data, "Received an unexpected revoke_and_ack" ); check_added_monitors(&nodes[1], 1); @@ -6748,7 +6748,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 1000000); @@ -7733,7 +7733,7 @@ pub fn test_htlc_no_detection() { &block, nodes[0].best_block_info().1 + 1, ); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -7821,7 +7821,7 @@ fn do_test_onchain_htlc_settlement_after_close( .node .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[force_closing_node], true); + check_closed_broadcast(&nodes[force_closing_node], 1, true); check_added_monitors(&nodes[force_closing_node], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[force_closing_node], 1, reason, &[counterparty_node_id], 100000); @@ -7836,7 +7836,7 @@ fn do_test_onchain_htlc_settlement_after_close( &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]), ); if broadcast_alice { - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -7925,7 +7925,7 @@ fn do_test_onchain_htlc_settlement_after_close( ); // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -8120,7 +8120,7 @@ pub fn test_error_chans_closed() { &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }, ); check_added_monitors(&nodes[0], 1); - check_closed_broadcast!(nodes[0], false); + check_closed_broadcast(&nodes[0], 1, false); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; @@ -8318,7 +8318,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let route = get_route!(nodes[0], payment_params, 10_000).unwrap(); let (our_payment_preimage, our_payment_hash, our_payment_secret) = - get_payment_preimage_hash!(&nodes[1]); + get_payment_preimage_hash(&nodes[1], None, None); { let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -8467,7 +8467,7 @@ pub fn test_inconsistent_mpp_params() { } }); - let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, payment_secret) = get_payment_preimage_hash(&nodes[3], None, None); let cur_height = nodes[0].best_block_info().1; let id = PaymentId([42; 32]); @@ -9476,7 +9476,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); let (hash, payment_preimage, payment_secret) = if use_user_hash { let (payment_preimage, hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); + get_payment_preimage_hash(&nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); (hash, payment_preimage, payment_secret) } else { let (hash, payment_secret) = nodes[1] diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 5b2ffca5fd4..63faa984968 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -268,7 +268,8 @@ pub fn test_channel_reserve_holding_cell_htlcs() { { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, our_payment_hash, our_payment_secret) = + get_payment_preimage_hash(&nodes[2], None, None); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); @@ -1098,7 +1099,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); assert_eq!(nodes[0].node.list_channels().len(), 0); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); let reason = ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }; check_added_monitors(&nodes[0], 1); @@ -1291,7 +1292,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { 3, ); assert_eq!(nodes[1].node.list_channels().len(), 1); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data.clone() }; @@ -1409,7 +1410,7 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { "Remote side tried to send a 0-msat HTLC", 3, ); - check_closed_broadcast!(nodes[1], true).unwrap(); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string(), @@ -1566,7 +1567,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat - 1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; @@ -1611,7 +1612,7 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; @@ -1678,7 +1679,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { nodes[1].node.handle_update_add_htlc(node_a_id, &msg); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)") .unwrap() .is_match(err_msg.data.as_str())); @@ -1713,7 +1714,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value") .unwrap() .is_match(err_msg.data.as_str())); @@ -1745,7 +1746,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; @@ -1809,7 +1810,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[1], 1, true).pop().unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)") .unwrap() .is_match(err_msg.data.as_str())); @@ -1851,7 +1852,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert!(regex::Regex::new( r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" ) @@ -1895,7 +1896,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert!(regex::Regex::new( r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" ) @@ -1938,7 +1939,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert!(regex::Regex::new( r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" ) @@ -2001,7 +2002,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; @@ -2060,7 +2061,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage") .unwrap() .is_match(err_msg.data.as_str())); @@ -2133,7 +2134,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + let err_msg = check_closed_broadcast(&nodes[0], 1, true).pop().unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index fd33ec217ca..157445874b7 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -84,7 +84,7 @@ fn chanmon_fail_from_stale_commitment() { // Don't bother delivering the new HTLC add/commits, instead confirming the pre-HTLC commitment // transaction for nodes[1]. mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -140,7 +140,7 @@ fn revoked_output_htlc_resolution_timing() { // Confirm the revoked commitment transaction, closing the channel. mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); @@ -187,7 +187,7 @@ fn archive_fully_resolved_monitors() { let message = "Channel force-closed".to_owned(); nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors(&nodes[0], 1); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); @@ -369,9 +369,9 @@ fn do_chanmon_claim_value_coop_close(keyed_anchors: bool, p2a_anchor: bool) { nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &node_0_closing_signed); let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], nodes[0].node.get_our_node_id()); assert!(node_1_none.is_none()); let shutdown_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -678,11 +678,11 @@ fn do_test_claim_value_force_close(keyed_anchors: bool, p2a_anchor: bool, prev_c assert_eq!(remote_txn[0].output[b_broadcast_txn[0].input[0].previous_output.vout as usize].value.to_sat(), 3_000); assert_eq!(remote_txn[0].output[b_broadcast_txn[1].input[0].previous_output.vout as usize].value.to_sat(), 4_000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); assert!(nodes[1].node.list_channels().is_empty()); @@ -916,7 +916,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b let node_a_commitment_claimable = nodes[0].best_block_info().1 + BREAKDOWN_TIMEOUT as u32; nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone()).unwrap(); check_added_monitors(&nodes[0], 1); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { @@ -976,7 +976,7 @@ fn do_test_balances_on_local_commitment_htlcs(keyed_anchors: bool, p2a_anchor: b // Get nodes[1]'s HTLC claim tx for the second HTLC mine_transaction(&nodes[1], &commitment_tx); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -1207,7 +1207,7 @@ fn test_no_preimage_inbound_htlc_balances() { mine_transaction(&nodes[0], &as_txn[0]); nodes[0].tx_broadcaster.clear(); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); @@ -1215,7 +1215,7 @@ fn test_no_preimage_inbound_htlc_balances() { sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); @@ -1428,7 +1428,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc let _b_htlc_msgs = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); connect_blocks(&nodes[0], htlc_cltv_timeout + 1 - 10); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -1457,7 +1457,7 @@ fn do_test_revoked_counterparty_commitment_balances(keyed_anchors: bool, p2a_anc } connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), @@ -1718,7 +1718,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor // B will generate an HTLC-Success from its revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); if keyed_anchors || p2a_anchor { @@ -1762,7 +1762,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(keyed_anchors: bool, p2a_anchor &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); let to_remote_conf_height = nodes[0].best_block_info().1 + ANTI_REORG_DELAY - 1; @@ -2042,7 +2042,7 @@ fn do_test_revoked_counterparty_aggregated_claims(keyed_anchors: bool, p2a_ancho sorted_vec(nodes[1].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); mine_transaction(&nodes[1], &as_revoked_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[1], 1); @@ -2414,7 +2414,7 @@ fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: b assert_eq!(commitment_txn.len(), if keyed_anchors || p2a_anchor { 1 /* commitment tx only */} else { 2 /* commitment and htlc timeout tx */ }); check_spends!(&commitment_txn[0], &funding_tx); mine_transaction(&nodes[0], &commitment_txn[0]); - check_closed_broadcast!(&nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); @@ -3156,13 +3156,13 @@ fn do_test_monitor_claims_with_random_signatures(keyed_anchors: bool, p2a_anchor if p2a_anchor { mine_transaction(closing_node, anchor_tx.as_ref().unwrap()); } - check_closed_broadcast!(closing_node, true); + check_closed_broadcast(closing_node, 1, true); check_added_monitors(&closing_node, 1); let message = "ChannelMonitor-initiated commitment transaction broadcast".to_string(); check_closed_event(&closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, &[other_node.node.get_our_node_id()], 1_000_000); mine_transaction(other_node, &commitment_tx); - check_closed_broadcast!(other_node, true); + check_closed_broadcast(other_node, 1, true); check_added_monitors(&other_node, 1); check_closed_event(&other_node, 1, ClosureReason::CommitmentTxConfirmed, &[closing_node.node.get_our_node_id()], 1_000_000); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 27e0cfafade..fe7d8332101 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -418,7 +418,7 @@ fn test_fee_failures() { // If the hop gives fee_insufficient but enough fees were provided, then the previous hop // malleated the payment before forwarding, taking funds when they shouldn't have. However, // because we ignore channel update contents, we will still blame the 2nd channel. - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test( "fee_insufficient", @@ -449,7 +449,7 @@ fn test_fee_failures() { } let (payment_preimage_success, payment_hash_success, payment_secret_success) = - get_payment_preimage_hash!(nodes[2]); + get_payment_preimage_hash(&nodes[2], None, None); let recipient_onion = RecipientOnionFields::secret_only(payment_secret_success); let payment_id = PaymentId(payment_hash_success.0); nodes[0] @@ -667,7 +667,7 @@ fn test_onion_failure() { Some(route.paths[0].hops[1].short_channel_id), None, ); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None); // intermediate node failure run_onion_failure_test_with_fail_intercept( @@ -738,7 +738,7 @@ fn test_onion_failure() { Some(route.paths[0].hops[1].short_channel_id), None, ); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None); // intermediate node failure run_onion_failure_test_with_fail_intercept( @@ -811,7 +811,7 @@ fn test_onion_failure() { Some(route.paths[0].hops[1].short_channel_id), None, ); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in // the UpdateAddHTLC that we sent. @@ -1142,7 +1142,7 @@ fn test_onion_failure() { None, None, ); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None); run_onion_failure_test( "final_expiry_too_soon", @@ -2426,7 +2426,7 @@ fn test_phantom_onion_hmac_failure() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // Route the HTLC through to the destination. @@ -2496,7 +2496,7 @@ fn test_phantom_invalid_onion_payload() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // We'll use the session priv later when constructing an invalid onion packet. @@ -2598,7 +2598,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // Route the HTLC through to the destination. @@ -2664,7 +2664,7 @@ fn test_phantom_failure_too_low_cltv() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // Modify the route to have a too-low cltv. @@ -2720,7 +2720,7 @@ fn test_phantom_failure_modified_cltv() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // Route the HTLC through to the destination. @@ -2775,7 +2775,7 @@ fn test_phantom_failure_expires_too_soon() { // Get the route. let recv_value_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_value_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_value_msat), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, recv_value_msat, channel); // Route the HTLC through to the destination. @@ -2825,7 +2825,7 @@ fn test_phantom_failure_too_low_recv_amt() { let recv_amt_msat = 10_000; let bad_recv_amt_msat = recv_amt_msat - 10; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_amt_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_amt_msat), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, bad_recv_amt_msat, channel); // Route the HTLC through to the destination. @@ -2894,7 +2894,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { // Get the route with an amount exceeding the dust exposure threshold of nodes[1]. let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(max_dust_exposure + 1)); + get_payment_preimage_hash(&nodes[1], Some(max_dust_exposure + 1), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, max_dust_exposure + 1, channel); // Route the HTLC through to the destination. @@ -2944,7 +2944,7 @@ fn test_phantom_failure_reject_payment() { // Get the route with a too-low amount. let recv_amt_msat = 10_000; let (_, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[1], Some(recv_amt_msat)); + get_payment_preimage_hash(&nodes[1], Some(recv_amt_msat), None); let (mut route, phantom_scid) = get_phantom_route!(nodes, recv_amt_msat, channel); // Route the HTLC through to the destination. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0eace2eab08..f0b22135177 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -908,7 +908,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { }, _ => panic!("Unexpected event"), } - check_closed_broadcast!(nodes[1], false); + check_closed_broadcast(&nodes[1], 1, false); // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when // we close in a moment. @@ -1118,7 +1118,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { }, _ => panic!("Unexpected event"), } - check_closed_broadcast!(nodes[1], false); + check_closed_broadcast(&nodes[1], 1, false); // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional @@ -1283,7 +1283,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( .node .force_close_broadcasting_latest_txn(&chan_id, &node_b_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_b_id], 100000); @@ -1686,7 +1686,7 @@ fn onchain_failed_probe_yields_event() { // Node A, which after 6 confirmations should result in a probe failure event. let bs_txn = get_local_commitment_txn!(nodes[1], chan_id); confirm_transaction(&nodes[0], &bs_txn[0]); - check_closed_broadcast!(&nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 0); @@ -2168,7 +2168,7 @@ fn test_holding_cell_inflight_htlcs() { let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); + let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash(&nodes[1], None, None); // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. @@ -4290,7 +4290,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes(&nodes, 2, 3); - let (payment_preimage, hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); + let (payment_preimage, hash, payment_secret) = get_payment_preimage_hash(&nodes[3], None, None); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) .unwrap(); @@ -4688,7 +4688,7 @@ fn do_test_custom_tlvs_consistency( } }); - let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, payment_secret) = get_payment_preimage_hash(&nodes[3], None, None); let id = PaymentId([42; 32]); let amt_msat = 15_000_000; @@ -4832,7 +4832,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { // Pay more than half of each channel's max, requiring MPP let amt_msat = 750_000_000; let (payment_preimage, payment_hash, payment_secret) = - get_payment_preimage_hash!(nodes[3], Some(amt_msat)); + get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); let payment_id = PaymentId(payment_hash.0); let payment_metadata = vec![44, 49, 52, 142]; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 2e9b47725db..cc5eac60206 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -713,7 +713,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, ); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg) }; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], 1000000); - check_closed_broadcast!(nodes[1], false); + check_closed_broadcast(&nodes[1], 1, false); } } @@ -1022,7 +1022,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[2], 1, reason, &[nodes[1].node.get_our_node_id()], 100000); - check_closed_broadcast!(nodes[2], true); + check_closed_broadcast(&nodes[2], 1, true); let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b39e8d31a75..89d2f2c5ae6 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -79,7 +79,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // Give node 2 node 1's transactions and get its response (claiming the HTLC instead). connect_block(&nodes[2], &create_dummy_block(nodes[2].best_block_hash(), 42, node_1_commitment_txn.clone())); - check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) + check_closed_broadcast(&nodes[2], 1, true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) check_added_monitors(&nodes[2], 1); check_closed_event(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 100000); let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -113,7 +113,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg vec![node_2_commitment_txn.pop().unwrap()] }; - check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) + check_closed_broadcast(&nodes[1], 1, true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate) check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[2].node.get_our_node_id()], 100000); // Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1. @@ -212,7 +212,7 @@ fn test_counterparty_revoked_reorg() { // Now mine A's old commitment transaction, which should close the channel, but take no action // on any of the HTLCs, at least until we get six confirmations (which we won't get). mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); @@ -497,7 +497,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node A commitment transaction mine_transaction(&nodes[0], &remote_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); // Verify node A broadcast tx claiming both HTLCs @@ -512,7 +512,7 @@ fn test_set_outpoints_partial_claiming() { // Connect blocks on node B connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_closed_events(&nodes[1], &[ExpectedCloseEvent { channel_capacity_sats: Some(1_000_000), channel_id: Some(chan.2), @@ -596,11 +596,11 @@ fn do_test_to_remote_after_local_detection(style: ConnectStyle) { mine_transaction(&nodes[0], &remote_txn_a[0]); mine_transaction(&nodes[1], &remote_txn_a[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); assert!(nodes[0].node.list_channels().is_empty()); check_added_monitors(&nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[1].node.get_our_node_id()], 1000000); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); assert!(nodes[1].node.list_channels().is_empty()); check_added_monitors(&nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[nodes[0].node.get_our_node_id()], 1000000); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 870f00ee9df..474b422b655 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -71,9 +71,9 @@ fn pre_funding_lock_shutdown_test() { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); @@ -122,9 +122,9 @@ fn expect_channel_shutdown_state() { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); @@ -216,9 +216,9 @@ fn expect_channel_shutdown_state_with_htlc() { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; check_closed_event(&nodes[0], 1, reason_a, &[node_b_id], 100000); @@ -284,9 +284,9 @@ fn test_lnd_bug_6039() { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; @@ -361,7 +361,7 @@ fn expect_channel_shutdown_state_with_force_closure() { .node .force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, message.clone()) .unwrap(); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown); @@ -371,7 +371,7 @@ fn expect_channel_shutdown_state_with_force_closure() { assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan_1.3); mine_transaction(&nodes[0], &node_txn[0]); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.list_channels().is_empty()); @@ -410,7 +410,7 @@ fn updates_shutdown_wait() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[0]); + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[0], None, None); let payment_params_1 = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) @@ -483,9 +483,9 @@ fn updates_shutdown_wait() { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; @@ -618,9 +618,9 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); assert!(nodes[0].node.list_channels().is_empty()); @@ -750,8 +750,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = - get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); assert!(node_0_2nd_closing_signed.is_some()); } @@ -799,10 +798,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_2nd_closing_signed) = - get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); let reason = ClosureReason::LocallyInitiatedCooperativeClosure; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); @@ -834,7 +832,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and // checks it, but in this case nodes[1] didn't ever get a chance to receive a // closing_signed so we do it ourselves - check_closed_broadcast!(nodes[1], false); + check_closed_broadcast(&nodes[1], 1, false); check_added_monitors(&nodes[1], 1); let peer_msg = format!( "Got a message for a channel from the wrong node! No such channel_id {} for the passed counterparty_node_id {}", @@ -1388,7 +1386,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let node_0_2nd_closing_signed = get_closing_signed_broadcast(&nodes[0], node_b_id); if timeout_step == TimeoutStep::NoTimeout { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.1.unwrap()); let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; @@ -1418,7 +1416,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { || (txn[0].output[1].script_pubkey.is_p2wpkh() && txn[0].output[0].script_pubkey.is_p2wsh()) ); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), @@ -1480,11 +1478,11 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { } nodes[1].node.handle_closing_signed(node_a_id, &node_0_closing_signed); - let (_, mut node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, mut node_1_closing_signed) = get_closing_signed_broadcast(&nodes[1], node_a_id); node_1_closing_signed.as_mut().unwrap().fee_range = None; nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed.unwrap()); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_none) = get_closing_signed_broadcast(&nodes[0], node_b_id); assert!(node_0_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; check_closed_event(&nodes[0], 1, reason_a, &[node_b_id], 100000); @@ -1528,7 +1526,7 @@ fn simple_target_feerate_shutdown() { let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &node_0_closing_signed); - let (_, node_1_closing_signed_opt) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_closing_signed_opt) = get_closing_signed_broadcast(&nodes[1], node_a_id); let node_1_closing_signed = node_1_closing_signed_opt.unwrap(); // nodes[1] was passed a target which was larger than the current channel feerate, which it @@ -1558,7 +1556,7 @@ fn simple_target_feerate_shutdown() { assert_eq!(node_0_closing_signed.fee_satoshis, node_1_closing_signed.fee_satoshis); nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, node_0_none) = get_closing_signed_broadcast(&nodes[0], node_b_id); assert!(node_0_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; check_closed_event(&nodes[0], 1, reason_a, &[node_b_id], 100000); @@ -1660,9 +1658,9 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { let bs_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id); nodes[0].node.handle_closing_signed(node_b_id, &bs_closing_signed); - let (_, as_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, as_2nd_closing_signed) = get_closing_signed_broadcast(&nodes[0], node_b_id); nodes[1].node.handle_closing_signed(node_a_id, &as_2nd_closing_signed.unwrap()); - let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); + let (_, node_1_none) = get_closing_signed_broadcast(&nodes[1], node_a_id); assert!(node_1_none.is_none()); let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 24ae8525450..a31bf18ef38 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -523,7 +523,7 @@ pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: Chann let err = "Funding remote cannot afford proposed new fee"; nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], channel_value); } @@ -620,7 +620,7 @@ pub fn test_update_fee_that_saturates_subs() { let err = "Funding remote cannot afford proposed new fee"; nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 10_000); } @@ -1002,7 +1002,7 @@ pub fn accept_busted_but_better_fee() { required_feerate_sat_per_kw: 5000, }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), diff --git a/lightning/src/ln/zero_fee_commitment_tests.rs b/lightning/src/ln/zero_fee_commitment_tests.rs index b7221552603..aae9c8419ba 100644 --- a/lightning/src/ln/zero_fee_commitment_tests.rs +++ b/lightning/src/ln/zero_fee_commitment_tests.rs @@ -185,12 +185,12 @@ fn test_htlc_claim_chunking() { assert_eq!(htlc_claims[1].input.len(), 34); assert_eq!(htlc_claims[1].output.len(), 24); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], CHAN_CAPACITY); assert!(nodes[0].node.list_channels().is_empty()); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, &[nodes[0].node.get_our_node_id()], CHAN_CAPACITY); @@ -346,7 +346,7 @@ fn test_anchor_tx_too_big() { .force_close_broadcasting_latest_txn(&chan_id, &node_a_id, message.clone()) .unwrap(); check_added_monitors(&nodes[1], 1); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[1], 1, reason, &[node_a_id], CHAN_CAPACITY); diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index cb4bdeb6a51..46d52915be9 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1533,7 +1533,6 @@ impl From for UpdateName { mod tests { use super::*; use crate::chain::ChannelMonitorUpdateStatus; - use crate::check_closed_broadcast; use crate::events::ClosureReason; use crate::ln::functional_test_utils::*; use crate::ln::msgs::BaseMessageHandler; @@ -1756,7 +1755,7 @@ mod tests { let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; check_closed_event(&nodes[0], 1, reason, &[node_id_1], 100000); - check_closed_broadcast!(nodes[0], true); + check_closed_broadcast(&nodes[0], 1, true); check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); @@ -1765,7 +1764,7 @@ mod tests { let dummy_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); connect_block(&nodes[1], &dummy_block); - check_closed_broadcast!(nodes[1], true); + check_closed_broadcast(&nodes[1], 1, true); let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000);