From 8683df9671f502ec70f1e3ff01bc9c88d1cdb2d0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 01:34:30 +0000 Subject: [PATCH 1/9] Remove more `.node.get_our_node_id()` cruft in `async_signer_tests` Having tons of `.node.get_our_nod_id()` spewn across all our tests doesn't help readability, so here we replace some further cases of it with local variables in `async_signer_tests.rs`. --- lightning/src/ln/async_signer_tests.rs | 120 ++++++++++--------------- 1 file changed, 46 insertions(+), 74 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 2e411b42f09..14377c10fae 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -294,6 +294,9 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -309,52 +312,37 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::GetPerCommitmentPoint, - ); - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::ReleaseCommitmentSecret, - ); - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::GetPerCommitmentPoint); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::ReleaseCommitmentSecret); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); let mut enabled_signer_ops = new_hash_set(); log_trace!(dst.logger, "enable_signer_op_order={:?}", enable_signer_op_order); for op in enable_signer_op_order { enabled_signer_ops.insert(op); - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, op); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, op); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if enabled_signer_ops.contains(&SignerOp::GetPerCommitmentPoint) && enabled_signer_ops.contains(&SignerOp::ReleaseCommitmentSecret) { // We are just able to send revoke_and_ack if op == SignerOp::GetPerCommitmentPoint || op == SignerOp::ReleaseCommitmentSecret { - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src_node_id); } // We either just sent or previously sent revoke_and_ack // and now we are able to send commitment_signed if op == SignerOp::SignCounterpartyCommitment { - get_htlc_update_msgs(dst, &src.node.get_our_node_id()); + get_htlc_update_msgs(dst, &src_node_id); } } else { // We can't send either message until RAA is unblocked @@ -533,6 +521,9 @@ fn do_test_async_raa_peer_disconnect( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -548,10 +539,10 @@ fn do_test_async_raa_peer_disconnect( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { // Fail to persist the monitor update when handling the commitment_signed. @@ -560,19 +551,16 @@ fn do_test_async_raa_peer_disconnect( // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); let events = dst.node.get_and_clear_pending_msg_events(); assert!(events.is_empty(), "expected no message, got {}", events.len()); // Now disconnect and reconnect the peers. - src.node.peer_disconnected(dst.node.get_our_node_id()); - dst.node.peer_disconnected(src.node.get_our_node_id()); + src.node.peer_disconnected(dst_node_id); + dst.node.peer_disconnected(src_node_id); // do reestablish stuff let init_msg = &msgs::Init { @@ -580,7 +568,7 @@ fn do_test_async_raa_peer_disconnect( networks: None, remote_network_address: None, }; - src.node.peer_connected(dst.node.get_our_node_id(), init_msg, true).unwrap(); + src.node.peer_connected(dst_node_id, init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(src, dst); assert_eq!(reestablish_1.len(), 1); let init_msg = &msgs::Init { @@ -588,19 +576,19 @@ fn do_test_async_raa_peer_disconnect( networks: None, remote_network_address: None, }; - dst.node.peer_connected(src.node.get_our_node_id(), init_msg, false).unwrap(); + dst.node.peer_connected(src_node_id, init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(dst, src); assert_eq!(reestablish_2.len(), 1); if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { // Reenable the signer before the reestablish. - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); } - dst.node.handle_channel_reestablish(src.node.get_our_node_id(), &reestablish_1[0]); + dst.node.handle_channel_reestablish(src_node_id, &reestablish_1[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let latest_update; { @@ -624,8 +612,8 @@ fn do_test_async_raa_peer_disconnect( } // Mark dst's signer as available and retry: we now expect to see dst's RAA + CS. - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { let (_, revoke_and_ack, commitment_signed, resend_order) = @@ -681,6 +669,9 @@ fn do_test_async_commitment_signature_peer_disconnect( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -696,10 +687,10 @@ fn do_test_async_commitment_signature_peer_disconnect( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { // Fail to persist the monitor update when handling the commitment_signed. @@ -708,24 +699,17 @@ fn do_test_async_commitment_signature_peer_disconnect( // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); if test_case != UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src_node_id); } // Now disconnect and reconnect the peers. - src.node.peer_disconnected(dst.node.get_our_node_id()); - dst.node.peer_disconnected(src.node.get_our_node_id()); + src.node.peer_disconnected(dst_node_id); + dst.node.peer_disconnected(src_node_id); // do reestablish stuff let init_msg = &msgs::Init { @@ -733,7 +717,7 @@ fn do_test_async_commitment_signature_peer_disconnect( networks: None, remote_network_address: None, }; - src.node.peer_connected(dst.node.get_our_node_id(), init_msg, true).unwrap(); + src.node.peer_connected(dst_node_id, init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(src, dst); assert_eq!(reestablish_1.len(), 1); let init_msg = &msgs::Init { @@ -741,27 +725,19 @@ fn do_test_async_commitment_signature_peer_disconnect( networks: None, remote_network_address: None, }; - dst.node.peer_connected(src.node.get_our_node_id(), init_msg, false).unwrap(); + dst.node.peer_connected(src_node_id, init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(dst, src); assert_eq!(reestablish_2.len(), 1); if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { // Reenable the signer before the reestablish. - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); } - dst.node.handle_channel_reestablish(src.node.get_our_node_id(), &reestablish_1[0]); + dst.node.handle_channel_reestablish(src_node_id, &reestablish_1[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let latest_update; { @@ -782,12 +758,8 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { let (_, _, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); From 113eb36998bdf80d509596477dcbe76ccc56030c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:02:45 +0000 Subject: [PATCH 2/9] Clean up `.node.get_our_node_id()` cruft in `functional_test_utils` Having tons of `.node.get_our_nod_id()` spewn across all our tests doesn't help readability, so here we replace some further cases of it with local variables in `functional_test_utils.rs`. --- lightning/src/ln/functional_test_utils.rs | 250 ++++++++++++---------- 1 file changed, 137 insertions(+), 113 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 05967fa6d23..a8321094b1e 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1463,22 +1463,21 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId, ) -> Transaction { + let node_b_id = node_b.node.get_our_node_id(); + let node_a_id = node_a.node.get_our_node_id(); + let (temporary_channel_id, tx, _) = - create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); + create_funding_transaction(node_a, &node_b_id, channel_value, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); assert!(node_a .node - .funding_transaction_generated( - temporary_channel_id, - node_b.node.get_our_node_id(), - tx.clone() - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); check_added_monitors!(node_a, 0); let funding_created_msg = - get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); + get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); let channel_id = ChannelId::v1_from_funding_txid( @@ -1486,18 +1485,18 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( funding_created_msg.funding_output_index, ); - node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created_msg); + node_b.node.handle_funding_created(node_a_id, &funding_created_msg); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); + expect_channel_pending_event(&node_b, &node_a_id); node_a.node.handle_funding_signed( - node_b.node.get_our_node_id(), - &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()), + node_b_id, + &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a_id), ); { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); @@ -1505,7 +1504,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); + expect_channel_pending_event(&node_a, &node_b_id); let events_4 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); @@ -1519,7 +1518,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( .node .funding_transaction_generated( temporary_channel_id, - node_b.node.get_our_node_id(), + node_b_id, tx.clone() ) .is_err()); @@ -1537,10 +1536,13 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( let initiator_channels = initiator.node.list_usable_channels().len(); let receiver_channels = receiver.node.list_usable_channels().len(); + let receiver_node_id = receiver.node.get_our_node_id(); + let initiator_node_id = initiator.node.get_our_node_id(); + initiator .node .create_channel( - receiver.node.get_our_node_id(), + receiver_node_id, 100_000, 10_001, 42, @@ -1551,10 +1553,10 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( let open_channel = get_event_msg!( initiator, MessageSendEvent::SendOpenChannel, - receiver.node.get_our_node_id() + receiver_node_id ); - receiver.node.handle_open_channel(initiator.node.get_our_node_id(), &open_channel); + receiver.node.handle_open_channel(initiator_node_id, &open_channel); let events = receiver.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1563,7 +1565,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( .node .accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, - &initiator.node.get_our_node_id(), + &initiator_node_id, 0, None, ) @@ -1575,38 +1577,38 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( let accept_channel = get_event_msg!( receiver, MessageSendEvent::SendAcceptChannel, - initiator.node.get_our_node_id() + initiator_node_id ); assert_eq!(accept_channel.common_fields.minimum_depth, 0); - initiator.node.handle_accept_channel(receiver.node.get_our_node_id(), &accept_channel); + initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42); + create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); initiator .node .funding_transaction_generated( temporary_channel_id, - receiver.node.get_our_node_id(), + receiver_node_id, tx.clone(), ) .unwrap(); let funding_created = get_event_msg!( initiator, MessageSendEvent::SendFundingCreated, - receiver.node.get_our_node_id() + receiver_node_id ); - receiver.node.handle_funding_created(initiator.node.get_our_node_id(), &funding_created); + receiver.node.handle_funding_created(initiator_node_id, &funding_created); check_added_monitors!(receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; match &bs_signed_locked[0] { MessageSendEvent::SendFundingSigned { node_id, msg } => { - assert_eq!(*node_id, initiator.node.get_our_node_id()); - initiator.node.handle_funding_signed(receiver.node.get_our_node_id(), &msg); - expect_channel_pending_event(&initiator, &receiver.node.get_our_node_id()); - expect_channel_pending_event(&receiver, &initiator.node.get_our_node_id()); + assert_eq!(*node_id, initiator_node_id); + initiator.node.handle_funding_signed(receiver_node_id, &msg); + expect_channel_pending_event(&initiator, &receiver_node_id); + expect_channel_pending_event(&receiver, &initiator_node_id); check_added_monitors!(initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); @@ -1618,36 +1620,36 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( as_channel_ready = get_event_msg!( initiator, MessageSendEvent::SendChannelReady, - receiver.node.get_our_node_id() + receiver_node_id ); }, _ => panic!("Unexpected event"), } match &bs_signed_locked[1] { MessageSendEvent::SendChannelReady { node_id, msg } => { - assert_eq!(*node_id, initiator.node.get_our_node_id()); - initiator.node.handle_channel_ready(receiver.node.get_our_node_id(), &msg); - expect_channel_ready_event(&initiator, &receiver.node.get_our_node_id()); + assert_eq!(*node_id, initiator_node_id); + initiator.node.handle_channel_ready(receiver_node_id, &msg); + expect_channel_ready_event(&initiator, &receiver_node_id); }, _ => panic!("Unexpected event"), } - receiver.node.handle_channel_ready(initiator.node.get_our_node_id(), &as_channel_ready); - expect_channel_ready_event(&receiver, &initiator.node.get_our_node_id()); + receiver.node.handle_channel_ready(initiator_node_id, &as_channel_ready); + expect_channel_ready_event(&receiver, &initiator_node_id); let as_channel_update = get_event_msg!( initiator, MessageSendEvent::SendChannelUpdate, - receiver.node.get_our_node_id() + receiver_node_id ); let bs_channel_update = get_event_msg!( receiver, MessageSendEvent::SendChannelUpdate, - initiator.node.get_our_node_id() + initiator_node_id ); - initiator.node.handle_channel_update(receiver.node.get_our_node_id(), &bs_channel_update); - receiver.node.handle_channel_update(initiator.node.get_our_node_id(), &as_channel_update); + initiator.node.handle_channel_update(receiver_node_id, &bs_channel_update); + receiver.node.handle_channel_update(initiator_node_id, &as_channel_update); assert_eq!(initiator.node.list_usable_channels().len(), initiator_channels + 1); assert_eq!(receiver.node.list_usable_channels().len(), receiver_channels + 1); @@ -1658,12 +1660,15 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( pub fn exchange_open_accept_chan<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, ) -> ChannelId { + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + let create_chan_id = node_a .node - .create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None) + .create_channel(node_b_id, channel_value, push_msat, 42, None, None) .unwrap(); let open_channel_msg = - get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()); + get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); assert_eq!( node_a @@ -1675,7 +1680,7 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( .user_channel_id, 42 ); - node_b.node.handle_open_channel(node_a.node.get_our_node_id(), &open_channel_msg); + node_b.node.handle_open_channel(node_a_id, &open_channel_msg); if node_b.node.get_current_default_configuration().manually_accept_inbound_channels { let events = node_b.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -1688,9 +1693,9 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( }; } let accept_channel_msg = - get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()); + get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); - node_a.node.handle_accept_channel(node_b.node.get_our_node_id(), &accept_channel_msg); + node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); assert_ne!( node_b .node @@ -1790,13 +1795,16 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::ChannelReady, msgs::AnnouncementSignatures), ) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { - node_b.node.handle_channel_ready(node_a.node.get_our_node_id(), &as_funding_msgs.0); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + node_b.node.handle_channel_ready(node_a_id, &as_funding_msgs.0); let bs_announcement_sigs = get_event_msg!( node_b, MessageSendEvent::SendAnnouncementSignatures, - node_a.node.get_our_node_id() + node_a_id ); - node_b.node.handle_announcement_signatures(node_a.node.get_our_node_id(), &as_funding_msgs.1); + node_b.node.handle_announcement_signatures(node_a_id, &as_funding_msgs.1); let events_7 = node_b.node.get_and_clear_pending_msg_events(); assert_eq!(events_7.len(), 1); @@ -1809,7 +1817,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( node_a .node - .handle_announcement_signatures(node_b.node.get_our_node_id(), &bs_announcement_sigs); + .handle_announcement_signatures(node_b_id, &bs_announcement_sigs); let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { @@ -1828,7 +1836,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( *node_a.network_chan_count.borrow_mut() += 1; - expect_channel_ready_event(&node_b, &node_a.node.get_our_node_id()); + expect_channel_ready_event(&node_b, &node_a_id); ((*announcement).clone(), as_update, bs_update) } @@ -1857,12 +1865,15 @@ pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>( pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes: &'a Vec>, a: usize, b: usize, channel_value: u64, push_msat: u64, ) -> (msgs::ChannelReady, Transaction) { + let node_a_id = nodes[a].node.get_our_node_id(); + let node_b_id = nodes[b].node.get_our_node_id(); + let mut no_announce_cfg = nodes[a].node.get_current_default_configuration().clone(); no_announce_cfg.channel_handshake_config.announce_for_forwarding = false; nodes[a] .node .create_channel( - nodes[b].node.get_our_node_id(), + node_b_id, channel_value, push_msat, 42, @@ -1873,32 +1884,32 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let open_channel = get_event_msg!( nodes[a], MessageSendEvent::SendOpenChannel, - nodes[b].node.get_our_node_id() + node_b_id ); - nodes[b].node.handle_open_channel(nodes[a].node.get_our_node_id(), &open_channel); + nodes[b].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!( nodes[b], MessageSendEvent::SendAcceptChannel, - nodes[a].node.get_our_node_id() + node_a_id ); - nodes[a].node.handle_accept_channel(nodes[b].node.get_our_node_id(), &accept_channel); + nodes[a].node.handle_accept_channel(node_b_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&nodes[a], &nodes[b].node.get_our_node_id(), channel_value, 42); + create_funding_transaction(&nodes[a], &node_b_id, channel_value, 42); nodes[a] .node .funding_transaction_generated( temporary_channel_id, - nodes[b].node.get_our_node_id(), + node_b_id, tx.clone(), ) .unwrap(); nodes[b].node.handle_funding_created( - nodes[a].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[a], MessageSendEvent::SendFundingCreated, - nodes[b].node.get_our_node_id() + node_b_id ), ); check_added_monitors!(nodes[b], 1); @@ -1906,12 +1917,12 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let cs_funding_signed = get_event_msg!( nodes[b], MessageSendEvent::SendFundingSigned, - nodes[a].node.get_our_node_id() + node_a_id ); - expect_channel_pending_event(&nodes[b], &nodes[a].node.get_our_node_id()); + expect_channel_pending_event(&nodes[b], &node_a_id); - nodes[a].node.handle_funding_signed(nodes[b].node.get_our_node_id(), &cs_funding_signed); - expect_channel_pending_event(&nodes[a], &nodes[b].node.get_our_node_id()); + nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); + expect_channel_pending_event(&nodes[a], &node_b_id); check_added_monitors!(nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); @@ -1927,32 +1938,32 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( let as_channel_ready = get_event_msg!( nodes[a], MessageSendEvent::SendChannelReady, - nodes[b].node.get_our_node_id() + node_b_id ); nodes[a].node.handle_channel_ready( - nodes[b].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[b], MessageSendEvent::SendChannelReady, - nodes[a].node.get_our_node_id() + node_a_id ), ); - expect_channel_ready_event(&nodes[a], &nodes[b].node.get_our_node_id()); + expect_channel_ready_event(&nodes[a], &node_b_id); let as_update = get_event_msg!( nodes[a], MessageSendEvent::SendChannelUpdate, - nodes[b].node.get_our_node_id() + node_b_id ); - nodes[b].node.handle_channel_ready(nodes[a].node.get_our_node_id(), &as_channel_ready); - expect_channel_ready_event(&nodes[b], &nodes[a].node.get_our_node_id()); + nodes[b].node.handle_channel_ready(node_a_id, &as_channel_ready); + expect_channel_ready_event(&nodes[b], &node_a_id); let bs_update = get_event_msg!( nodes[b], MessageSendEvent::SendChannelUpdate, - nodes[a].node.get_our_node_id() + node_a_id ); - nodes[a].node.handle_channel_update(nodes[b].node.get_our_node_id(), &bs_update); - nodes[b].node.handle_channel_update(nodes[a].node.get_our_node_id(), &as_update); + nodes[a].node.handle_channel_update(node_b_id, &bs_update); + nodes[b].node.handle_channel_update(node_a_id, &as_update); let mut found_a = false; for chan in nodes[a].node.list_usable_channels() { @@ -2633,25 +2644,28 @@ pub fn commitment_signed_dance_through_cp_raa( pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, ) -> (Option, msgs::RevokeAndACK) { + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + let (as_revoke_and_ack, as_commitment_signed) = - get_revoke_commit_msgs!(node_a, node_b.node.get_our_node_id()); + get_revoke_commit_msgs!(node_a, node_b_id); check_added_monitors!(node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - node_b.node.handle_revoke_and_ack(node_a.node.get_our_node_id(), &as_revoke_and_ack); + node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); node_b .node - .handle_commitment_signed_batch_test(node_a.node.get_our_node_id(), &as_commitment_signed); + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); let node_a_event = - remove_first_msg_event_to_node(&node_a.node.get_our_node_id(), &mut events); + remove_first_msg_event_to_node(&node_a_id, &mut events); ( match node_a_event { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); (*msg).clone() }, _ => panic!("Unexpected event"), @@ -2676,17 +2690,19 @@ pub fn do_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, skip_last_step: bool, ) { + let node_b_id = node_b.node.get_our_node_id(); + check_added_monitors!(node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a .node - .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); + .handle_commitment_signed_batch_test(node_b_id, commitment_signed); check_added_monitors!(node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; let got_claim = - node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), channel_id); + node_a.node.test_raa_monitor_updates_held(node_b_id, channel_id); if fail_backwards { assert!(!got_claim); } @@ -2700,7 +2716,7 @@ pub fn do_commitment_signed_dance( expect_pending_htlcs_forwardable_and_htlc_handling_failed!( node_a, [crate::events::HTLCHandlingFailureType::Forward { - node_id: Some(node_b.node.get_our_node_id()), + node_id: Some(node_b_id), channel_id }] ); @@ -2714,7 +2730,7 @@ pub fn do_commitment_signed_dance( number_of_msg_events += cp_pending_msg_events.len(); if cp_pending_msg_events.len() == 1 { if let MessageSendEvent::UpdateHTLCs { .. } = cp_pending_msg_events[0] { - assert_ne!(*cp_id, node_b.node.get_our_node_id()); + assert_ne!(*cp_id, node_b_id); } else { panic!("Unexpected event"); } @@ -4689,6 +4705,9 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( dummy_connected = true } + let node_a_id = nodes[a].node.get_our_node_id(); + let node_b_id = nodes[b].node.get_our_node_id(); + let events_1 = nodes[a].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 2); let as_update = match events_1[1] { @@ -4700,20 +4719,20 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg }, } => { - assert_eq!(node_id, nodes[b].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert_eq!(msg.data, expected_error); if needs_err_handle { - nodes[b].node.handle_error(nodes[a].node.get_our_node_id(), msg); + nodes[b].node.handle_error(node_a_id, msg); } }, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg }, } => { - assert_eq!(node_id, nodes[b].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, expected_error); if needs_err_handle { - nodes[b].node.handle_error(nodes[a].node.get_our_node_id(), msg.as_ref().unwrap()); + nodes[b].node.handle_error(node_a_id, msg.as_ref().unwrap()); } }, _ => panic!("Unexpected event"), @@ -4738,14 +4757,14 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg }, } => { - assert_eq!(node_id, nodes[a].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert_eq!(msg.data, expected_error); }, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg }, } => { - assert_eq!(node_id, nodes[a].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert_eq!(msg.as_ref().unwrap().data, expected_error); }, _ => panic!("Unexpected event"), @@ -4754,10 +4773,9 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( if dummy_connected { disconnect_dummy_node(&nodes[b]); } - let node_id_a = nodes[a].node.get_our_node_id(); for node in nodes { - node.gossip_sync.handle_channel_update(Some(node_id_a), &as_update).unwrap(); - node.gossip_sync.handle_channel_update(Some(node_id_a), &bs_update).unwrap(); + node.gossip_sync.handle_channel_update(Some(node_a_id), &as_update).unwrap(); + node.gossip_sync.handle_channel_update(Some(node_a_id), &bs_update).unwrap(); } } @@ -4955,6 +4973,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + if send_channel_ready.0 { // If a expects a channel_ready, it better not think it has received a revoke_and_ack // from b @@ -4986,7 +5007,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let mut resp_1 = Vec::new(); for msg in reestablish_1 { - node_b.node.handle_channel_reestablish(node_a.node.get_our_node_id(), &msg); + node_b.node.handle_channel_reestablish(node_a_id, &msg); resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); } if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { @@ -4997,7 +5018,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let mut resp_2 = Vec::new(); for msg in reestablish_2 { - node_a.node.handle_channel_reestablish(node_b.node.get_our_node_id(), &msg); + node_a.node.handle_channel_reestablish(node_b_id, &msg); resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); } if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { @@ -5022,7 +5043,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { for chan_msgs in resp_1.drain(..) { if send_channel_ready.0 { - node_a.node.handle_channel_ready(node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); + node_a.node.handle_channel_ready(node_b_id, &chan_msgs.0.unwrap()); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -5037,7 +5058,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()); + node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { @@ -5062,15 +5083,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_a.node.handle_update_add_htlc(node_b.node.get_our_node_id(), &update_add); + node_a.node.handle_update_add_htlc(node_b_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { node_a .node - .handle_update_fulfill_htlc(node_b.node.get_our_node_id(), &update_fulfill); + .handle_update_fulfill_htlc(node_b_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_a.node.handle_update_fail_htlc(node_b.node.get_our_node_id(), &update_fail); + node_a.node.handle_update_fail_htlc(node_b_id, &update_fail); } if !pending_responding_commitment_signed.0 { @@ -5082,19 +5103,19 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); } else { node_a.node.handle_commitment_signed_batch_test( - node_b.node.get_our_node_id(), + node_b_id, &commitment_update.commitment_signed, ); check_added_monitors!(node_a, 1); let as_revoke_and_ack = get_event_msg!( node_a, MessageSendEvent::SendRevokeAndACK, - node_b.node.get_our_node_id() + node_b_id ); // No commitment_signed so get_event_msg's assert(len == 1) passes node_b .node - .handle_revoke_and_ack(node_a.node.get_our_node_id(), &as_revoke_and_ack); + .handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_b, @@ -5108,7 +5129,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { for chan_msgs in resp_2.drain(..) { if send_channel_ready.1 { - node_b.node.handle_channel_ready(node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()); + node_b.node.handle_channel_ready(node_a_id, &chan_msgs.0.unwrap()); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -5123,7 +5144,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_b.node.handle_revoke_and_ack(node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()); + node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { @@ -5148,15 +5169,15 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_b.node.handle_update_add_htlc(node_a.node.get_our_node_id(), &update_add); + node_b.node.handle_update_add_htlc(node_a_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { node_b .node - .handle_update_fulfill_htlc(node_a.node.get_our_node_id(), &update_fulfill); + .handle_update_fulfill_htlc(node_a_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_b.node.handle_update_fail_htlc(node_a.node.get_our_node_id(), &update_fail); + node_b.node.handle_update_fail_htlc(node_a_id, &update_fail); } if !pending_responding_commitment_signed.1 { @@ -5168,19 +5189,19 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); } else { node_b.node.handle_commitment_signed_batch_test( - node_a.node.get_our_node_id(), + node_a_id, &commitment_update.commitment_signed, ); check_added_monitors!(node_b, 1); let bs_revoke_and_ack = get_event_msg!( node_b, MessageSendEvent::SendRevokeAndACK, - node_a.node.get_our_node_id() + node_a_id ); // No commitment_signed so get_event_msg's assert(len == 1) passes node_a .node - .handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); + .handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_a, @@ -5206,11 +5227,14 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( for (other_node, channel_value_satoshis, push_msat, user_channel_id, override_config) in params { + let funding_node_id = funding_node.node.get_our_node_id(); + let other_node_id = other_node.node.get_our_node_id(); + // Initialize channel opening. let temp_chan_id = funding_node .node .create_channel( - other_node.node.get_our_node_id(), + other_node_id, *channel_value_satoshis, *push_msat, *user_channel_id, @@ -5221,17 +5245,17 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( let open_channel_msg = get_event_msg!( funding_node, MessageSendEvent::SendOpenChannel, - other_node.node.get_our_node_id() + other_node_id ); - other_node.node.handle_open_channel(funding_node.node.get_our_node_id(), &open_channel_msg); + other_node.node.handle_open_channel(funding_node_id, &open_channel_msg); let accept_channel_msg = get_event_msg!( other_node, MessageSendEvent::SendAcceptChannel, - funding_node.node.get_our_node_id() + funding_node_id ); funding_node .node - .handle_accept_channel(other_node.node.get_our_node_id(), &accept_channel_msg); + .handle_accept_channel(other_node_id, &accept_channel_msg); // Create the corresponding funding output. let events = funding_node.node.get_and_clear_pending_events(); @@ -5245,7 +5269,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( user_channel_id: ref event_user_channel_id, } => { assert_eq!(temporary_channel_id, &temp_chan_id); - assert_eq!(counterparty_node_id, &other_node.node.get_our_node_id()); + assert_eq!(counterparty_node_id, &other_node_id); assert_eq!(channel_value_satoshis, event_channel_value_satoshis); assert_eq!(user_channel_id, event_user_channel_id); tx_outs.push(TxOut { @@ -5255,7 +5279,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( }, _ => panic!("Unexpected event"), }; - temp_chan_ids.push((temp_chan_id, other_node.node.get_our_node_id())); + temp_chan_ids.push((temp_chan_id, other_node_id)); } // Compose the batch funding transaction and give it to the ChannelManager. From 6bafb2ab1bae4b3ef3a46685592c422ea7e8bc6a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:07:29 +0000 Subject: [PATCH 3/9] Clean up `handle_*(..., get_event_msg!(...))` cruft Calling a macro within a method expression is somewhat annoying to read, and due to `rustfmt` expansion, calling a `handle_*` method with a `get_event_msg` macro as the second argument tends to be substantially less readable than just breaking the mess out into its own variable. --- lightning/src/ln/functional_test_utils.rs | 72 ++++++++--------------- 1 file changed, 26 insertions(+), 46 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a8321094b1e..ecb06c625e8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1494,10 +1494,8 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( } expect_channel_pending_event(&node_b, &node_a_id); - node_a.node.handle_funding_signed( - node_b_id, - &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a_id), - ); + let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a_id); + node_a.node.handle_funding_signed(node_b_id, &bs_funding_signed); { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -1723,14 +1721,12 @@ pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>( ) { confirm_transaction_at(node_conf, tx, conf_height); connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1); - node_recv.node.handle_channel_ready( - node_conf.node.get_our_node_id(), - &get_event_msg!( - node_conf, - MessageSendEvent::SendChannelReady, - node_recv.node.get_our_node_id() - ), + let channel_ready = get_event_msg!( + node_conf, + MessageSendEvent::SendChannelReady, + node_recv.node.get_our_node_id() ); + node_recv.node.handle_channel_ready(node_conf.node.get_our_node_id(), &channel_ready); } pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>( @@ -1904,14 +1900,9 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( tx.clone(), ) .unwrap(); - nodes[b].node.handle_funding_created( - node_a_id, - &get_event_msg!( - nodes[a], - MessageSendEvent::SendFundingCreated, - node_b_id - ), - ); + let as_funding_created = + get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); check_added_monitors!(nodes[b], 1); let cs_funding_signed = get_event_msg!( @@ -1940,14 +1931,8 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( MessageSendEvent::SendChannelReady, node_b_id ); - nodes[a].node.handle_channel_ready( - node_b_id, - &get_event_msg!( - nodes[b], - MessageSendEvent::SendChannelReady, - node_a_id - ), - ); + let bs_channel_ready = get_event_msg!(nodes[b], MessageSendEvent::SendChannelReady, node_a_id); + nodes[a].node.handle_channel_ready(node_b_id, &bs_channel_ready); expect_channel_ready_event(&nodes[a], &node_b_id); let as_update = get_event_msg!( nodes[a], @@ -2317,10 +2302,9 @@ pub fn close_channel<'a, 'b, 'c>( let (tx_a, tx_b); node_a.close_channel(channel_id, &node_b.get_our_node_id()).unwrap(); - node_b.handle_shutdown( - node_a.get_our_node_id(), - &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id()), - ); + let as_shutdown = + get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id()); + node_b.handle_shutdown(node_a.get_our_node_id(), &as_shutdown); let events_1 = node_b.get_and_clear_pending_msg_events(); assert!(events_1.len() >= 1); @@ -2350,14 +2334,12 @@ pub fn close_channel<'a, 'b, 'c>( assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b.unwrap()); - node_b.handle_closing_signed( - node_a.get_our_node_id(), - &get_event_msg!( - struct_a, - MessageSendEvent::SendClosingSigned, - node_b.get_our_node_id() - ), + let as_closing_signed = get_event_msg!( + struct_a, + MessageSendEvent::SendClosingSigned, + node_b.get_our_node_id() ); + node_b.handle_closing_signed(node_a.get_our_node_id(), &as_closing_signed); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); let (bs_update, closing_signed_b) = @@ -2372,16 +2354,14 @@ pub fn close_channel<'a, 'b, 'c>( } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); - node_b.handle_closing_signed(node_a.get_our_node_id(), &closing_signed_a); - node_a.handle_closing_signed( - node_b.get_our_node_id(), - &get_event_msg!( - struct_b, - MessageSendEvent::SendClosingSigned, - node_a.get_our_node_id() - ), + + let closing_signed_b =get_event_msg!( + struct_b, + MessageSendEvent::SendClosingSigned, + node_a.get_our_node_id() ); + node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); From 222edc27cd60234a855634327025d63745eec484 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:16:35 +0000 Subject: [PATCH 4/9] Clean up unreadable assertions in `expect_payment_forwarded` In some cases `rustfmt` makes long boolean expressions fairly difficult to parse. Here we clean up a specific case in `functional_test_utils.rs` which was recently exposed. --- lightning/src/ln/functional_test_utils.rs | 31 ++++++++++++----------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index ecb06c625e8..575b7d50867 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3056,12 +3056,14 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); // Is the event prev_channel_id in one of the channels between the two nodes? - assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id - == prev_node.node().get_our_node_id() - && prev_node.node().get_our_node_id() == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + let node_chans = node.node().list_channels(); + assert!(node_chans + .iter() + .any(|x| x.counterparty.node_id == prev_node_id.unwrap() + && x.channel_id == prev_channel_id.unwrap() + && x.user_channel_id == prev_user_channel_id.unwrap())); } // We check for force closures since a force closed channel is removed from the // node's channel list @@ -3070,20 +3072,19 @@ pub fn expect_payment_forwarded>( // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. + assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node - .node() - .list_channels() + assert!(node_chans .iter() - .any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() - && next_node.node().get_our_node_id() == next_node_id.unwrap() + .any(|x| x.counterparty.node_id == next_node_id.unwrap() && x.channel_id == next_channel_id.unwrap())); } else { - assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id - == next_node.node().get_our_node_id() - && next_node.node().get_our_node_id() == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans + .iter() + .any(|x| x.counterparty.node_id == next_node_id.unwrap() + && x.channel_id == next_channel_id.unwrap() + && x.user_channel_id == next_user_channel_id.unwrap())); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); From ca8cbb1f7f78f5a38f581e3d135f8a799b2733b3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:38:29 +0000 Subject: [PATCH 5/9] Clean up rustfmt `check_closed_event` expansion in `shutdown_tests` `rustfmt` loves to make expressions vertical by putting each method parameter on its own line. In cases where each method parameter is actually doing something, this can be fine, but in some cases we have a few method parameters that are straightforward and shouldn't be the readers focus, mixed with one or two parameters which are key. Here, we clean up a regular instance of this in calls to `check_closed_event` in `shutdown_tests.rs`. It breaks out the `ClosureReason` (the thing that usually is being tested for) from the remaining parameters, which are fairly straightforward and not the important points, leaving the rest of `check_closed_event` on one line. This also fixes some instances where `rustfmt` refused to format code entirely. --- lightning/src/ln/shutdown_tests.rs | 289 +++++++---------------------- 1 file changed, 72 insertions(+), 217 deletions(-) diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 4ada551b3d3..edc1548d450 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -77,20 +77,10 @@ fn pre_funding_lock_shutdown_test() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 8000000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 8000000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 8000000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 8000000); } #[test] @@ -138,20 +128,10 @@ fn expect_channel_shutdown_state() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -239,20 +219,10 @@ fn expect_channel_shutdown_state_with_htlc() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -317,20 +287,11 @@ fn test_lnd_bug_6039() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -358,13 +319,8 @@ fn shutdown_on_unfunded_channel() { scriptpubkey: script, }, ); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyCoopClosedUnfundedChannel, - [node_b_id], - 1_000_000 - ); + let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); } #[test] @@ -381,13 +337,8 @@ fn close_on_unfunded_channel() { let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[0].node.close_channel(&chan_id, &node_b_id).unwrap(); - check_closed_event!( - nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - [node_b_id], - 1_000_000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); } #[test] @@ -425,13 +376,8 @@ fn expect_channel_shutdown_state_with_force_closure() { assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - check_closed_event!( - nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [node_a_id], - 100000 - ); + let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -555,42 +501,24 @@ fn updates_shutdown_wait() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_c_id], - 100000 - ); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } #[test] @@ -720,13 +648,8 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); let event1 = ExpectedCloseEvent { channel_capacity_sats: Some(100000), channel_id: None, @@ -746,13 +669,8 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { user_channel_id: None, }; check_closed_events(&nodes[1], &[event1, event2]); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } fn do_test_shutdown_rebroadcast(recv_count: u8) { @@ -941,13 +859,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } else { // If one node, however, received + responded with an identical closing_signed we end // up erroring and node[0] will try to broadcast its own latest commitment transaction. @@ -978,8 +891,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) } - , [node_a_id], 100000); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } assert!(nodes[0].node.list_channels().is_empty()); @@ -987,29 +900,16 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_c_id], - 100000 - ); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } #[test] @@ -1193,8 +1093,8 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { }, _ => panic!("Unexpected event"), } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: OP_PUSHNUM_16 OP_PUSHBYTES_2 0028".to_string() } - , [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: OP_PUSHNUM_16 OP_PUSHBYTES_2 0028".to_string() }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[test] @@ -1526,21 +1426,11 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, node_b_id); if timeout_step == TimeoutStep::NoTimeout { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.1.unwrap()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); } if timeout_step != TimeoutStep::NoTimeout { @@ -1565,16 +1455,11 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { ); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!( - nodes[1], - 1, - ClosureReason::ProcessingError { - err: "closing_signed negotiation failed to finish within two timer ticks" - .to_string() - }, - [node_a_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "closing_signed negotiation failed to finish within two timer ticks" + .to_string() + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } else { assert!(txn[0].output[0].script_pubkey.is_p2wpkh()); assert!(txn[0].output[1].script_pubkey.is_p2wpkh()); @@ -1637,20 +1522,10 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -1721,20 +1596,10 @@ fn simple_target_feerate_shutdown() { nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { @@ -1839,20 +1704,10 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] From 7ee4adf2fb2243cab72fd2373a1e9f3aebeae239 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:45:30 +0000 Subject: [PATCH 6/9] Clean up some `send_payment_with_route` call rustfmt verticality `rustfmt` loves to make expressions vertical by putting each method parameter on its own line. In cases where each method parameter is actually doing something, this can be fine, but in some cases we have a few method parameters that are straightforward and shouldn't be the readers focus, mixed with one or two parameters which are key. Here, we clean up a regular instance of this in calls to `send_payment_with_route` in `priv_short_conf_tests.rs` and `shutdown_tests.rs`. In these tests, the fact that we're sending a payment along a route picked on a previous line is important, but the specific parameters of the payment are not. Thus, condensing the calls onto a single line by breaking out some parameters into variables enables the reader to more easily skim past useless details. --- lightning/src/ln/priv_short_conf_tests.rs | 98 +++++++---------------- lightning/src/ln/shutdown_tests.rs | 43 +++------- 2 files changed, 41 insertions(+), 100 deletions(-) diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 96a3ee77dcf..cd56e644a48 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -79,14 +79,11 @@ fn test_priv_forwarding_rejection() { let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 10_000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); nodes[0] .node - .send_payment_with_route( - route.clone(), - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - ) + .send_payment_with_route(route.clone(), our_payment_hash, onion, id) .unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = @@ -199,15 +196,9 @@ fn test_priv_forwarding_rejection() { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, node_b_id); - nodes[0] - .node - .send_payment_with_route( - route, - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route( &nodes[0], @@ -388,15 +379,10 @@ fn test_routed_scid_alias() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); assert_eq!(route.paths[0].hops[1].short_channel_id, last_hop[0].inbound_scid_alias.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -623,15 +609,10 @@ fn test_inbound_scid_privacy() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); assert_eq!(route.paths[0].hops[1].short_channel_id, last_hop[0].inbound_scid_alias.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -649,15 +630,10 @@ fn test_inbound_scid_privacy() { let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_2, 100_000); assert_eq!(route_2.paths[0].hops[1].short_channel_id, last_hop[0].short_channel_id.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route_2, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); @@ -751,15 +727,10 @@ fn test_scid_alias_returned() { route.paths[0].hops[1].fee_msat = 10_000_000; // Overshoot the last channel's value // Route the HTLC through to the destination. - nodes[0] - .node - .send_payment_with_route( - route.clone(), - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); @@ -794,15 +765,10 @@ fn test_scid_alias_returned() { route.paths[0].hops[0].fee_msat = 0; // But set fee paid to the middle hop to 0 // Route the HTLC through to the destination. - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); @@ -1003,15 +969,9 @@ fn test_0conf_channel_with_async_monitor() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index edc1548d450..ae36ee9e0e4 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -441,30 +441,15 @@ fn updates_shutdown_wait() { &random_seed_bytes, ) .unwrap(); - unwrap_send_err!( - nodes[0], - nodes[0].node.send_payment_with_route( - route_1, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ), - true, - APIError::ChannelUnavailable { .. }, - {} - ); - unwrap_send_err!( - nodes[1], - nodes[1].node.send_payment_with_route( - route_2, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ), - true, - APIError::ChannelUnavailable { .. }, - {} - ); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route_1, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = nodes[1].node.send_payment_with_route(route_2, payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[2], 1); @@ -558,15 +543,11 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { amt_msat, ) }; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); nodes[0] .node - .send_payment( - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - route_params, - Retry::Attempts(0), - ) + .send_payment( our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); From 7882d04c311c0349e4df72f18ff7be9bbc2589d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:46:20 +0000 Subject: [PATCH 7/9] Clean up some `peer_connected` call rustfmt verticality `rustfmt` loves to make expressions vertical by putting each method parameter on its own line. In cases where each method parameter is actually doing something, this can be fine, but in some cases we have a few method parameters that are straightforward and shouldn't be the readers focus, mixed with one or two parameters which are key. Here, we clean up a regular instance of this in calls to `peer_connected` in `priv_short_conf_tests.rs` and `shutdown_tests.rs`. In these tests, the fact that we're reconnecting is important, but the specific features in the `Init` message are not. Thus, condensing the calls onto two lines by breaking the init message out into a variable enables the reader to more easily skim past useless details. --- lightning/src/ln/priv_short_conf_tests.rs | 68 +++++++---------------- lightning/src/ln/shutdown_tests.rs | 57 +++---------------- 2 files changed, 29 insertions(+), 96 deletions(-) diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index cd56e644a48..1556b7bcad1 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -134,30 +134,19 @@ fn test_priv_forwarding_rejection() { nodes_1_deserialized ); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let bs_init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &bs_init_msg, true).unwrap(); + + let as_init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].node.peer_connected(node_a_id, &as_init_msg, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); @@ -165,30 +154,13 @@ fn test_priv_forwarding_rejection() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - nodes[1] - .node - .peer_connected( - node_c_id, - &msgs::Init { - features: nodes[2].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[2] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let cs_init_msg = msgs::Init { + features: nodes[2].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].node.peer_connected(node_c_id, &cs_init_msg, true).unwrap(); + nodes[2].node.peer_connected( node_b_id, &bs_init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap(); let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap(); nodes[2].node.handle_channel_reestablish(node_b_id, &bs_reestablish); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index ae36ee9e0e4..e6fcaaeda30 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -683,31 +683,14 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &node_0_reestablish); @@ -772,31 +755,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, true).unwrap(); let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[0].node.peer_connected(node_b_id, &init_msg, false).unwrap(); if recv_count == 0 { // If all closing_signeds weren't delivered we can just resume where we left off... let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); From 4ce23bc19525091e5785b641345ced9d5a2c351c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 02:50:01 +0000 Subject: [PATCH 8/9] Clean up misc `rustfmt` verticality in tests `rustfmt` loves to make expressions vertical by putting each method parameter or `.` subexpression on its own line. In cases where each method parameter or `.` subexpression is actually doing something, this can be fine, but in some cases we have a few method parameters that are straightforward and shouldn't be the readers focus, mixed with one or two parameters which are key or a few subexpressions which simply do rote variable indexing or locking. Here we clean up various `rustfmt` verticality that exposes unimportant details of tests and test utils, making it easier for the reader to skim past them by introducing additional variables. --- lightning/src/ln/async_signer_tests.rs | 18 ++----- lightning/src/ln/functional_test_utils.rs | 8 +-- lightning/src/ln/priv_short_conf_tests.rs | 62 ++++++++--------------- lightning/src/ln/shutdown_tests.rs | 33 ++++-------- 4 files changed, 37 insertions(+), 84 deletions(-) diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 14377c10fae..b667007295a 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -1299,20 +1299,10 @@ fn do_test_closing_signed(extra_closing_signed: bool, reconnect: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 575b7d50867..e4691d7252b 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -799,12 +799,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { { for channel_id in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); - self.chain_monitor - .chain_monitor - .get_monitor(channel_id) - .unwrap() - .write(&mut w) - .unwrap(); + let mon = self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap(); + mon.write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 1556b7bcad1..4e40add463f 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -105,13 +105,9 @@ fn test_priv_forwarding_rejection() { nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true); - expect_payment_failed_with_update!( - nodes[0], - our_payment_hash, - false, - nodes[2].node.list_channels()[0].short_channel_id.unwrap(), - true - ); + + let chan_2_scid = nodes[2].node.list_channels()[0].short_channel_id.unwrap(); + expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2_scid, true); // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set // to true. Sadly there is currently no way to change it at runtime. @@ -967,14 +963,11 @@ fn test_0conf_channel_with_async_monitor() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (_, latest_update) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&bs_raa.channel_id) - .unwrap() - .clone(); + let (_, latest_update) = { + let latest_monitor_update_id = + nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitor_update_id.get(&bs_raa.channel_id).unwrap().clone() + }; nodes[1] .chain_monitor .chain_monitor @@ -1020,13 +1013,8 @@ fn test_0conf_close_no_early_chan_update() { nodes[0].node.force_close_all_channels_broadcasting_latest_txn(error_message.to_string()); check_added_monitors!(nodes[0], 1); - check_closed_event!( - &nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [node_b_id], - 100000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); } @@ -1138,28 +1126,18 @@ fn test_0conf_channel_reorg() { // At this point the channel no longer has an SCID again. In the future we should likely // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for // now we force-close the channel here. - check_closed_event!( - &nodes[0], - 1, - ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() - }, - [node_b_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." + .to_owned() + }; + check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!( - &nodes[1], - 1, - ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() - }, - [node_a_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." + .to_owned() + }; + check_closed_event!(&nodes[1], 1, reason, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index e6fcaaeda30..7be5446e708 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -1346,20 +1346,12 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // nodes[1] should happily accept and respond to. node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - get_channel_ref!( - nodes[0], - nodes[1], - node_0_per_peer_lock, - node_0_peer_state_lock, - chan_id - ) - .context_mut() - .closing_fee_limits - .as_mut() - .unwrap() - .1 *= 10; + let mut per_peer_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + + chan.context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(node_a_id, &node_0_closing_signed); let node_1_closing_signed = @@ -1609,14 +1601,11 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id) - .unwrap() - .clone(); + let (latest_update, _) = { + let latest_monitor_update_id = + nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitor_update_id.get(&chan_id).unwrap().clone() + }; nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events(); From 4aa6d391d6cdaec8e48ac5859d9e10a1552d49ec Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 27 Jun 2025 03:02:27 +0000 Subject: [PATCH 9/9] Re-`rustfmt` tests The last few commits made various cleanups to some tests files, which we re-`rustfmt` here. --- lightning/src/ln/functional_test_utils.rs | 225 ++++++---------------- lightning/src/ln/priv_short_conf_tests.rs | 11 +- lightning/src/ln/shutdown_tests.rs | 7 +- 3 files changed, 63 insertions(+), 180 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e4691d7252b..245479e1df8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1510,11 +1510,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( // Ensure that funding_transaction_generated is idempotent. assert!(node_a .node - .funding_transaction_generated( - temporary_channel_id, - node_b_id, - tx.clone() - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 0); @@ -1535,20 +1531,10 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( initiator .node - .create_channel( - receiver_node_id, - 100_000, - 10_001, - 42, - None, - initiator_config, - ) + .create_channel(receiver_node_id, 100_000, 10_001, 42, None, initiator_config) .unwrap(); - let open_channel = get_event_msg!( - initiator, - MessageSendEvent::SendOpenChannel, - receiver_node_id - ); + let open_channel = + get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver_node_id); receiver.node.handle_open_channel(initiator_node_id, &open_channel); let events = receiver.node.get_and_clear_pending_events(); @@ -1568,11 +1554,8 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( _ => panic!("Unexpected event"), }; - let accept_channel = get_event_msg!( - receiver, - MessageSendEvent::SendAcceptChannel, - initiator_node_id - ); + let accept_channel = + get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator_node_id); assert_eq!(accept_channel.common_fields.minimum_depth, 0); initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); @@ -1580,17 +1563,10 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); initiator .node - .funding_transaction_generated( - temporary_channel_id, - receiver_node_id, - tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, receiver_node_id, tx.clone()) .unwrap(); - let funding_created = get_event_msg!( - initiator, - MessageSendEvent::SendFundingCreated, - receiver_node_id - ); + let funding_created = + get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); receiver.node.handle_funding_created(initiator_node_id, &funding_created); check_added_monitors!(receiver, 1); @@ -1611,11 +1587,8 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( tx ); - as_channel_ready = get_event_msg!( - initiator, - MessageSendEvent::SendChannelReady, - receiver_node_id - ); + as_channel_ready = + get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver_node_id); }, _ => panic!("Unexpected event"), } @@ -1631,16 +1604,10 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( receiver.node.handle_channel_ready(initiator_node_id, &as_channel_ready); expect_channel_ready_event(&receiver, &initiator_node_id); - let as_channel_update = get_event_msg!( - initiator, - MessageSendEvent::SendChannelUpdate, - receiver_node_id - ); - let bs_channel_update = get_event_msg!( - receiver, - MessageSendEvent::SendChannelUpdate, - initiator_node_id - ); + let as_channel_update = + get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver_node_id); + let bs_channel_update = + get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator_node_id); initiator.node.handle_channel_update(receiver_node_id, &bs_channel_update); receiver.node.handle_channel_update(initiator_node_id, &as_channel_update); @@ -1657,12 +1624,9 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); - let create_chan_id = node_a - .node - .create_channel(node_b_id, channel_value, push_msat, 42, None, None) - .unwrap(); - let open_channel_msg = - get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b_id); + let create_chan_id = + node_a.node.create_channel(node_b_id, channel_value, push_msat, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); assert_eq!( node_a @@ -1686,8 +1650,7 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( _ => panic!("Unexpected event"), }; } - let accept_channel_msg = - get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); assert_ne!( @@ -1791,11 +1754,8 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( let node_b_id = node_b.node.get_our_node_id(); node_b.node.handle_channel_ready(node_a_id, &as_funding_msgs.0); - let bs_announcement_sigs = get_event_msg!( - node_b, - MessageSendEvent::SendAnnouncementSignatures, - node_a_id - ); + let bs_announcement_sigs = + get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a_id); node_b.node.handle_announcement_signatures(node_a_id, &as_funding_msgs.1); let events_7 = node_b.node.get_and_clear_pending_msg_events(); @@ -1807,9 +1767,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( _ => panic!("Unexpected event"), }; - node_a - .node - .handle_announcement_signatures(node_b_id, &bs_announcement_sigs); + node_a.node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { @@ -1864,48 +1822,26 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( no_announce_cfg.channel_handshake_config.announce_for_forwarding = false; nodes[a] .node - .create_channel( - node_b_id, - channel_value, - push_msat, - 42, - None, - Some(no_announce_cfg), - ) + .create_channel(node_b_id, channel_value, push_msat, 42, None, Some(no_announce_cfg)) .unwrap(); - let open_channel = get_event_msg!( - nodes[a], - MessageSendEvent::SendOpenChannel, - node_b_id - ); + let open_channel = get_event_msg!(nodes[a], MessageSendEvent::SendOpenChannel, node_b_id); nodes[b].node.handle_open_channel(node_a_id, &open_channel); - let accept_channel = get_event_msg!( - nodes[b], - MessageSendEvent::SendAcceptChannel, - node_a_id - ); + let accept_channel = get_event_msg!(nodes[b], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[a].node.handle_accept_channel(node_b_id, &accept_channel); let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[a], &node_b_id, channel_value, 42); nodes[a] .node - .funding_transaction_generated( - temporary_channel_id, - node_b_id, - tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .unwrap(); let as_funding_created = get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); check_added_monitors!(nodes[b], 1); - let cs_funding_signed = get_event_msg!( - nodes[b], - MessageSendEvent::SendFundingSigned, - node_a_id - ); + let cs_funding_signed = + get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); expect_channel_pending_event(&nodes[b], &node_a_id); nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); @@ -1922,26 +1858,14 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( connect_blocks(&nodes[a], CHAN_CONFIRM_DEPTH - 1); confirm_transaction_at(&nodes[b], &tx, conf_height); connect_blocks(&nodes[b], CHAN_CONFIRM_DEPTH - 1); - let as_channel_ready = get_event_msg!( - nodes[a], - MessageSendEvent::SendChannelReady, - node_b_id - ); + let as_channel_ready = get_event_msg!(nodes[a], MessageSendEvent::SendChannelReady, node_b_id); let bs_channel_ready = get_event_msg!(nodes[b], MessageSendEvent::SendChannelReady, node_a_id); nodes[a].node.handle_channel_ready(node_b_id, &bs_channel_ready); expect_channel_ready_event(&nodes[a], &node_b_id); - let as_update = get_event_msg!( - nodes[a], - MessageSendEvent::SendChannelUpdate, - node_b_id - ); + let as_update = get_event_msg!(nodes[a], MessageSendEvent::SendChannelUpdate, node_b_id); nodes[b].node.handle_channel_ready(node_a_id, &as_channel_ready); expect_channel_ready_event(&nodes[b], &node_a_id); - let bs_update = get_event_msg!( - nodes[b], - MessageSendEvent::SendChannelUpdate, - node_a_id - ); + let bs_update = get_event_msg!(nodes[b], MessageSendEvent::SendChannelUpdate, node_a_id); nodes[a].node.handle_channel_update(node_b_id, &bs_update); nodes[b].node.handle_channel_update(node_a_id, &as_update); @@ -2330,11 +2254,8 @@ pub fn close_channel<'a, 'b, 'c>( assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b.unwrap()); - let as_closing_signed = get_event_msg!( - struct_a, - MessageSendEvent::SendClosingSigned, - node_b.get_our_node_id() - ); + let as_closing_signed = + get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); node_b.handle_closing_signed(node_a.get_our_node_id(), &as_closing_signed); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); @@ -2352,11 +2273,8 @@ pub fn close_channel<'a, 'b, 'c>( get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); node_b.handle_closing_signed(node_a.get_our_node_id(), &closing_signed_a); - let closing_signed_b =get_event_msg!( - struct_b, - MessageSendEvent::SendClosingSigned, - node_a.get_our_node_id() - ); + let closing_signed_b = + get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id()); node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); @@ -2623,21 +2541,17 @@ pub fn do_main_commitment_signed_dance( let node_a_id = node_a.node.get_our_node_id(); let node_b_id = node_b.node.get_our_node_id(); - let (as_revoke_and_ack, as_commitment_signed) = - get_revoke_commit_msgs!(node_a, node_b_id); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(node_a, node_b_id); check_added_monitors!(node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); - node_b - .node - .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); + node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); - let node_a_event = - remove_first_msg_event_to_node(&node_a_id, &mut events); + let node_a_event = remove_first_msg_event_to_node(&node_a_id, &mut events); ( match node_a_event { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { @@ -2670,15 +2584,12 @@ pub fn do_commitment_signed_dance( check_added_monitors!(node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - node_a - .node - .handle_commitment_signed_batch_test(node_b_id, commitment_signed); + node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); check_added_monitors!(node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; - let got_claim = - node_a.node.test_raa_monitor_updates_held(node_b_id, channel_id); + let got_claim = node_a.node.test_raa_monitor_updates_held(node_b_id, channel_id); if fail_backwards { assert!(!got_claim); } @@ -3055,11 +2966,9 @@ pub fn expect_payment_forwarded>( assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); // Is the event prev_channel_id in one of the channels between the two nodes? let node_chans = node.node().list_channels(); - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() + && x.channel_id == prev_channel_id.unwrap() + && x.user_channel_id == prev_user_channel_id.unwrap())); } // We check for force closures since a force closed channel is removed from the // node's channel list @@ -5063,9 +4972,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_a.node.handle_update_add_htlc(node_b_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_a - .node - .handle_update_fulfill_htlc(node_b_id, &update_fulfill); + node_a.node.handle_update_fulfill_htlc(node_b_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_a.node.handle_update_fail_htlc(node_b_id, &update_fail); @@ -5084,15 +4991,10 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ); check_added_monitors!(node_a, 1); - let as_revoke_and_ack = get_event_msg!( - node_a, - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_revoke_and_ack = + get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_b - .node - .handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); + node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_b, @@ -5149,9 +5051,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { node_b.node.handle_update_add_htlc(node_a_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_b - .node - .handle_update_fulfill_htlc(node_a_id, &update_fulfill); + node_b.node.handle_update_fulfill_htlc(node_a_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { node_b.node.handle_update_fail_htlc(node_a_id, &update_fail); @@ -5170,15 +5070,10 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { &commitment_update.commitment_signed, ); check_added_monitors!(node_b, 1); - let bs_revoke_and_ack = get_event_msg!( - node_b, - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_revoke_and_ack = + get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_a - .node - .handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_a, @@ -5219,20 +5114,12 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( override_config.clone(), ) .unwrap(); - let open_channel_msg = get_event_msg!( - funding_node, - MessageSendEvent::SendOpenChannel, - other_node_id - ); + let open_channel_msg = + get_event_msg!(funding_node, MessageSendEvent::SendOpenChannel, other_node_id); other_node.node.handle_open_channel(funding_node_id, &open_channel_msg); - let accept_channel_msg = get_event_msg!( - other_node, - MessageSendEvent::SendAcceptChannel, - funding_node_id - ); - funding_node - .node - .handle_accept_channel(other_node_id, &accept_channel_msg); + let accept_channel_msg = + get_event_msg!(other_node, MessageSendEvent::SendAcceptChannel, funding_node_id); + funding_node.node.handle_accept_channel(other_node_id, &accept_channel_msg); // Create the corresponding funding output. let events = funding_node.node.get_and_clear_pending_events(); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 4e40add463f..623774acb5e 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -81,10 +81,7 @@ fn test_priv_forwarding_rejection() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); - nodes[0] - .node - .send_payment_with_route(route.clone(), our_payment_hash, onion, id) - .unwrap(); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -156,7 +153,7 @@ fn test_priv_forwarding_rejection() { remote_network_address: None, }; nodes[1].node.peer_connected(node_c_id, &cs_init_msg, true).unwrap(); - nodes[2].node.peer_connected( node_b_id, &bs_init_msg, false).unwrap(); + nodes[2].node.peer_connected(node_b_id, &bs_init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap(); let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap(); nodes[2].node.handle_channel_reestablish(node_b_id, &bs_reestablish); @@ -1128,14 +1125,14 @@ fn test_0conf_channel_reorg() { // now we force-close the channel here. let reason = ClosureReason::ProcessingError { err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() + .to_owned(), }; check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() + .to_owned(), }; check_closed_event!(&nodes[1], 1, reason, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 7be5446e708..9826b8a39cd 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -547,7 +547,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { let id = PaymentId(our_payment_hash.0); nodes[0] .node - .send_payment( our_payment_hash, onion, id, route_params, Retry::Attempts(0)) + .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -629,7 +629,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); let event1 = ExpectedCloseEvent { channel_capacity_sats: Some(100000), @@ -1390,8 +1390,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); let reason = ClosureReason::ProcessingError { - err: "closing_signed negotiation failed to finish within two timer ticks" - .to_string() + err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } else {