diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 2e411b42f09..b667007295a 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -294,6 +294,9 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -309,52 +312,37 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::GetPerCommitmentPoint, - ); - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::ReleaseCommitmentSecret, - ); - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::GetPerCommitmentPoint); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::ReleaseCommitmentSecret); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); let mut enabled_signer_ops = new_hash_set(); log_trace!(dst.logger, "enable_signer_op_order={:?}", enable_signer_op_order); for op in enable_signer_op_order { enabled_signer_ops.insert(op); - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, op); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, op); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if enabled_signer_ops.contains(&SignerOp::GetPerCommitmentPoint) && enabled_signer_ops.contains(&SignerOp::ReleaseCommitmentSecret) { // We are just able to send revoke_and_ack if op == SignerOp::GetPerCommitmentPoint || op == SignerOp::ReleaseCommitmentSecret { - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src_node_id); } // We either just sent or previously sent revoke_and_ack // and now we are able to send commitment_signed if op == SignerOp::SignCounterpartyCommitment { - get_htlc_update_msgs(dst, &src.node.get_our_node_id()); + get_htlc_update_msgs(dst, &src_node_id); } } else { // We can't send either message until RAA is unblocked @@ -533,6 +521,9 @@ fn do_test_async_raa_peer_disconnect( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -548,10 +539,10 @@ fn do_test_async_raa_peer_disconnect( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { // Fail to persist the monitor update when handling the commitment_signed. @@ -560,19 +551,16 @@ fn do_test_async_raa_peer_disconnect( // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); let events = dst.node.get_and_clear_pending_msg_events(); assert!(events.is_empty(), "expected no message, got {}", events.len()); // Now disconnect and reconnect the peers. - src.node.peer_disconnected(dst.node.get_our_node_id()); - dst.node.peer_disconnected(src.node.get_our_node_id()); + src.node.peer_disconnected(dst_node_id); + dst.node.peer_disconnected(src_node_id); // do reestablish stuff let init_msg = &msgs::Init { @@ -580,7 +568,7 @@ fn do_test_async_raa_peer_disconnect( networks: None, remote_network_address: None, }; - src.node.peer_connected(dst.node.get_our_node_id(), init_msg, true).unwrap(); + src.node.peer_connected(dst_node_id, init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(src, dst); assert_eq!(reestablish_1.len(), 1); let init_msg = &msgs::Init { @@ -588,19 +576,19 @@ fn do_test_async_raa_peer_disconnect( networks: None, remote_network_address: None, }; - dst.node.peer_connected(src.node.get_our_node_id(), init_msg, false).unwrap(); + dst.node.peer_connected(src_node_id, init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(dst, src); assert_eq!(reestablish_2.len(), 1); if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { // Reenable the signer before the reestablish. - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); } - dst.node.handle_channel_reestablish(src.node.get_our_node_id(), &reestablish_1[0]); + dst.node.handle_channel_reestablish(src_node_id, &reestablish_1[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let latest_update; { @@ -624,8 +612,8 @@ fn do_test_async_raa_peer_disconnect( } // Mark dst's signer as available and retry: we now expect to see dst's RAA + CS. - dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, block_raa_signer_op); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { let (_, revoke_and_ack, commitment_signed, resend_order) = @@ -681,6 +669,9 @@ fn do_test_async_commitment_signature_peer_disconnect( // Send a payment. let src = &nodes[0]; let dst = &nodes[1]; + let src_node_id = src.node.get_our_node_id(); + let dst_node_id = dst.node.get_our_node_id(); + let (route, our_payment_hash, _our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(src, dst, 8000000); let recipient_fields = RecipientOnionFields::secret_only(our_payment_secret); @@ -696,10 +687,10 @@ fn do_test_async_commitment_signature_peer_disconnect( assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, dst.node.get_our_node_id()); + assert_eq!(payment_event.node_id, dst_node_id); assert_eq!(payment_event.msgs.len(), 1); - dst.node.handle_update_add_htlc(src.node.get_our_node_id(), &payment_event.msgs[0]); + dst.node.handle_update_add_htlc(src_node_id, &payment_event.msgs[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { // Fail to persist the monitor update when handling the commitment_signed. @@ -708,24 +699,17 @@ fn do_test_async_commitment_signature_peer_disconnect( // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.disable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.handle_commitment_signed_batch_test( - src.node.get_our_node_id(), - &payment_event.commitment_msg, - ); + dst.disable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.handle_commitment_signed_batch_test(src_node_id, &payment_event.commitment_msg); check_added_monitors(dst, 1); if test_case != UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); + get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src_node_id); } // Now disconnect and reconnect the peers. - src.node.peer_disconnected(dst.node.get_our_node_id()); - dst.node.peer_disconnected(src.node.get_our_node_id()); + src.node.peer_disconnected(dst_node_id); + dst.node.peer_disconnected(src_node_id); // do reestablish stuff let init_msg = &msgs::Init { @@ -733,7 +717,7 @@ fn do_test_async_commitment_signature_peer_disconnect( networks: None, remote_network_address: None, }; - src.node.peer_connected(dst.node.get_our_node_id(), init_msg, true).unwrap(); + src.node.peer_connected(dst_node_id, init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(src, dst); assert_eq!(reestablish_1.len(), 1); let init_msg = &msgs::Init { @@ -741,27 +725,19 @@ fn do_test_async_commitment_signature_peer_disconnect( networks: None, remote_network_address: None, }; - dst.node.peer_connected(src.node.get_our_node_id(), init_msg, false).unwrap(); + dst.node.peer_connected(src_node_id, init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(dst, src); assert_eq!(reestablish_2.len(), 1); if test_case == UnblockSignerAcrossDisconnectCase::BeforeReestablish { // Reenable the signer before the reestablish. - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); } - dst.node.handle_channel_reestablish(src.node.get_our_node_id(), &reestablish_1[0]); + dst.node.handle_channel_reestablish(src_node_id, &reestablish_1[0]); if test_case == UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let latest_update; { @@ -782,12 +758,8 @@ fn do_test_async_commitment_signature_peer_disconnect( } // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.enable_channel_signer_op( - &src.node.get_our_node_id(), - &chan_id, - SignerOp::SignCounterpartyCommitment, - ); - dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); + dst.enable_channel_signer_op(&src_node_id, &chan_id, SignerOp::SignCounterpartyCommitment); + dst.node.signer_unblocked(Some((src_node_id, chan_id))); if test_case == UnblockSignerAcrossDisconnectCase::AtEnd { let (_, _, commitment_signed, _) = handle_chan_reestablish_msgs!(dst, src); @@ -1327,20 +1299,10 @@ fn do_test_closing_signed(extra_closing_signed: bool, reconnect: bool) { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 05967fa6d23..245479e1df8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -799,12 +799,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { { for channel_id in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); - self.chain_monitor - .chain_monitor - .get_monitor(channel_id) - .unwrap() - .write(&mut w) - .unwrap(); + let mon = self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap(); + mon.write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), @@ -1463,22 +1459,21 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, expected_temporary_channel_id: ChannelId, ) -> Transaction { + let node_b_id = node_b.node.get_our_node_id(); + let node_a_id = node_a.node.get_our_node_id(); + let (temporary_channel_id, tx, _) = - create_funding_transaction(node_a, &node_b.node.get_our_node_id(), channel_value, 42); + create_funding_transaction(node_a, &node_b_id, channel_value, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); assert!(node_a .node - .funding_transaction_generated( - temporary_channel_id, - node_b.node.get_our_node_id(), - tx.clone() - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_ok()); check_added_monitors!(node_a, 0); let funding_created_msg = - get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()); + get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b_id); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); let channel_id = ChannelId::v1_from_funding_txid( @@ -1486,26 +1481,24 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( funding_created_msg.funding_output_index, ); - node_b.node.handle_funding_created(node_a.node.get_our_node_id(), &funding_created_msg); + node_b.node.handle_funding_created(node_a_id, &funding_created_msg); { let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&node_b, &node_a.node.get_our_node_id()); + expect_channel_pending_event(&node_b, &node_a_id); - node_a.node.handle_funding_signed( - node_b.node.get_our_node_id(), - &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()), - ); + let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a_id); + node_a.node.handle_funding_signed(node_b_id, &bs_funding_signed); { let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&node_a, &node_b.node.get_our_node_id()); + expect_channel_pending_event(&node_a, &node_b_id); let events_4 = node_a.node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); @@ -1517,11 +1510,7 @@ pub fn sign_funding_transaction<'a, 'b, 'c>( // Ensure that funding_transaction_generated is idempotent. assert!(node_a .node - .funding_transaction_generated( - temporary_channel_id, - node_b.node.get_our_node_id(), - tx.clone() - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .is_err()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 0); @@ -1537,24 +1526,17 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( let initiator_channels = initiator.node.list_usable_channels().len(); let receiver_channels = receiver.node.list_usable_channels().len(); + let receiver_node_id = receiver.node.get_our_node_id(); + let initiator_node_id = initiator.node.get_our_node_id(); + initiator .node - .create_channel( - receiver.node.get_our_node_id(), - 100_000, - 10_001, - 42, - None, - initiator_config, - ) + .create_channel(receiver_node_id, 100_000, 10_001, 42, None, initiator_config) .unwrap(); - let open_channel = get_event_msg!( - initiator, - MessageSendEvent::SendOpenChannel, - receiver.node.get_our_node_id() - ); + let open_channel = + get_event_msg!(initiator, MessageSendEvent::SendOpenChannel, receiver_node_id); - receiver.node.handle_open_channel(initiator.node.get_our_node_id(), &open_channel); + receiver.node.handle_open_channel(initiator_node_id, &open_channel); let events = receiver.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1563,7 +1545,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( .node .accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, - &initiator.node.get_our_node_id(), + &initiator_node_id, 0, None, ) @@ -1572,41 +1554,31 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( _ => panic!("Unexpected event"), }; - let accept_channel = get_event_msg!( - receiver, - MessageSendEvent::SendAcceptChannel, - initiator.node.get_our_node_id() - ); + let accept_channel = + get_event_msg!(receiver, MessageSendEvent::SendAcceptChannel, initiator_node_id); assert_eq!(accept_channel.common_fields.minimum_depth, 0); - initiator.node.handle_accept_channel(receiver.node.get_our_node_id(), &accept_channel); + initiator.node.handle_accept_channel(receiver_node_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&initiator, &receiver.node.get_our_node_id(), 100_000, 42); + create_funding_transaction(&initiator, &receiver_node_id, 100_000, 42); initiator .node - .funding_transaction_generated( - temporary_channel_id, - receiver.node.get_our_node_id(), - tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, receiver_node_id, tx.clone()) .unwrap(); - let funding_created = get_event_msg!( - initiator, - MessageSendEvent::SendFundingCreated, - receiver.node.get_our_node_id() - ); + let funding_created = + get_event_msg!(initiator, MessageSendEvent::SendFundingCreated, receiver_node_id); - receiver.node.handle_funding_created(initiator.node.get_our_node_id(), &funding_created); + receiver.node.handle_funding_created(initiator_node_id, &funding_created); check_added_monitors!(receiver, 1); let bs_signed_locked = receiver.node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), 2); let as_channel_ready; match &bs_signed_locked[0] { MessageSendEvent::SendFundingSigned { node_id, msg } => { - assert_eq!(*node_id, initiator.node.get_our_node_id()); - initiator.node.handle_funding_signed(receiver.node.get_our_node_id(), &msg); - expect_channel_pending_event(&initiator, &receiver.node.get_our_node_id()); - expect_channel_pending_event(&receiver, &initiator.node.get_our_node_id()); + assert_eq!(*node_id, initiator_node_id); + initiator.node.handle_funding_signed(receiver_node_id, &msg); + expect_channel_pending_event(&initiator, &receiver_node_id); + expect_channel_pending_event(&receiver, &initiator_node_id); check_added_monitors!(initiator, 1); assert_eq!(initiator.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); @@ -1615,39 +1587,30 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( tx ); - as_channel_ready = get_event_msg!( - initiator, - MessageSendEvent::SendChannelReady, - receiver.node.get_our_node_id() - ); + as_channel_ready = + get_event_msg!(initiator, MessageSendEvent::SendChannelReady, receiver_node_id); }, _ => panic!("Unexpected event"), } match &bs_signed_locked[1] { MessageSendEvent::SendChannelReady { node_id, msg } => { - assert_eq!(*node_id, initiator.node.get_our_node_id()); - initiator.node.handle_channel_ready(receiver.node.get_our_node_id(), &msg); - expect_channel_ready_event(&initiator, &receiver.node.get_our_node_id()); + assert_eq!(*node_id, initiator_node_id); + initiator.node.handle_channel_ready(receiver_node_id, &msg); + expect_channel_ready_event(&initiator, &receiver_node_id); }, _ => panic!("Unexpected event"), } - receiver.node.handle_channel_ready(initiator.node.get_our_node_id(), &as_channel_ready); - expect_channel_ready_event(&receiver, &initiator.node.get_our_node_id()); + receiver.node.handle_channel_ready(initiator_node_id, &as_channel_ready); + expect_channel_ready_event(&receiver, &initiator_node_id); - let as_channel_update = get_event_msg!( - initiator, - MessageSendEvent::SendChannelUpdate, - receiver.node.get_our_node_id() - ); - let bs_channel_update = get_event_msg!( - receiver, - MessageSendEvent::SendChannelUpdate, - initiator.node.get_our_node_id() - ); + let as_channel_update = + get_event_msg!(initiator, MessageSendEvent::SendChannelUpdate, receiver_node_id); + let bs_channel_update = + get_event_msg!(receiver, MessageSendEvent::SendChannelUpdate, initiator_node_id); - initiator.node.handle_channel_update(receiver.node.get_our_node_id(), &bs_channel_update); - receiver.node.handle_channel_update(initiator.node.get_our_node_id(), &as_channel_update); + initiator.node.handle_channel_update(receiver_node_id, &bs_channel_update); + receiver.node.handle_channel_update(initiator_node_id, &as_channel_update); assert_eq!(initiator.node.list_usable_channels().len(), initiator_channels + 1); assert_eq!(receiver.node.list_usable_channels().len(), receiver_channels + 1); @@ -1658,12 +1621,12 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>( pub fn exchange_open_accept_chan<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, ) -> ChannelId { - let create_chan_id = node_a - .node - .create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None) - .unwrap(); - let open_channel_msg = - get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + let create_chan_id = + node_a.node.create_channel(node_b_id, channel_value, push_msat, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); assert_eq!( node_a @@ -1675,7 +1638,7 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( .user_channel_id, 42 ); - node_b.node.handle_open_channel(node_a.node.get_our_node_id(), &open_channel_msg); + node_b.node.handle_open_channel(node_a_id, &open_channel_msg); if node_b.node.get_current_default_configuration().manually_accept_inbound_channels { let events = node_b.node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -1687,10 +1650,9 @@ pub fn exchange_open_accept_chan<'a, 'b, 'c>( _ => panic!("Unexpected event"), }; } - let accept_channel_msg = - get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()); + let accept_channel_msg = get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(accept_channel_msg.common_fields.temporary_channel_id, create_chan_id); - node_a.node.handle_accept_channel(node_b.node.get_our_node_id(), &accept_channel_msg); + node_a.node.handle_accept_channel(node_b_id, &accept_channel_msg); assert_ne!( node_b .node @@ -1718,14 +1680,12 @@ pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>( ) { confirm_transaction_at(node_conf, tx, conf_height); connect_blocks(node_conf, CHAN_CONFIRM_DEPTH - 1); - node_recv.node.handle_channel_ready( - node_conf.node.get_our_node_id(), - &get_event_msg!( - node_conf, - MessageSendEvent::SendChannelReady, - node_recv.node.get_our_node_id() - ), + let channel_ready = get_event_msg!( + node_conf, + MessageSendEvent::SendChannelReady, + node_recv.node.get_our_node_id() ); + node_recv.node.handle_channel_ready(node_conf.node.get_our_node_id(), &channel_ready); } pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>( @@ -1790,13 +1750,13 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::ChannelReady, msgs::AnnouncementSignatures), ) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) { - node_b.node.handle_channel_ready(node_a.node.get_our_node_id(), &as_funding_msgs.0); - let bs_announcement_sigs = get_event_msg!( - node_b, - MessageSendEvent::SendAnnouncementSignatures, - node_a.node.get_our_node_id() - ); - node_b.node.handle_announcement_signatures(node_a.node.get_our_node_id(), &as_funding_msgs.1); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + node_b.node.handle_channel_ready(node_a_id, &as_funding_msgs.0); + let bs_announcement_sigs = + get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a_id); + node_b.node.handle_announcement_signatures(node_a_id, &as_funding_msgs.1); let events_7 = node_b.node.get_and_clear_pending_msg_events(); assert_eq!(events_7.len(), 1); @@ -1807,9 +1767,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( _ => panic!("Unexpected event"), }; - node_a - .node - .handle_announcement_signatures(node_b.node.get_our_node_id(), &bs_announcement_sigs); + node_a.node.handle_announcement_signatures(node_b_id, &bs_announcement_sigs); let events_8 = node_a.node.get_and_clear_pending_msg_events(); assert_eq!(events_8.len(), 1); let as_update = match events_8[0] { @@ -1828,7 +1786,7 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>( *node_a.network_chan_count.borrow_mut() += 1; - expect_channel_ready_event(&node_b, &node_a.node.get_our_node_id()); + expect_channel_ready_event(&node_b, &node_a_id); ((*announcement).clone(), as_update, bs_update) } @@ -1857,61 +1815,37 @@ pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>( pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( nodes: &'a Vec>, a: usize, b: usize, channel_value: u64, push_msat: u64, ) -> (msgs::ChannelReady, Transaction) { + let node_a_id = nodes[a].node.get_our_node_id(); + let node_b_id = nodes[b].node.get_our_node_id(); + let mut no_announce_cfg = nodes[a].node.get_current_default_configuration().clone(); no_announce_cfg.channel_handshake_config.announce_for_forwarding = false; nodes[a] .node - .create_channel( - nodes[b].node.get_our_node_id(), - channel_value, - push_msat, - 42, - None, - Some(no_announce_cfg), - ) + .create_channel(node_b_id, channel_value, push_msat, 42, None, Some(no_announce_cfg)) .unwrap(); - let open_channel = get_event_msg!( - nodes[a], - MessageSendEvent::SendOpenChannel, - nodes[b].node.get_our_node_id() - ); - nodes[b].node.handle_open_channel(nodes[a].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!( - nodes[b], - MessageSendEvent::SendAcceptChannel, - nodes[a].node.get_our_node_id() - ); - nodes[a].node.handle_accept_channel(nodes[b].node.get_our_node_id(), &accept_channel); + let open_channel = get_event_msg!(nodes[a], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[b].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[b], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[a].node.handle_accept_channel(node_b_id, &accept_channel); let (temporary_channel_id, tx, _) = - create_funding_transaction(&nodes[a], &nodes[b].node.get_our_node_id(), channel_value, 42); + create_funding_transaction(&nodes[a], &node_b_id, channel_value, 42); nodes[a] .node - .funding_transaction_generated( - temporary_channel_id, - nodes[b].node.get_our_node_id(), - tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) .unwrap(); - nodes[b].node.handle_funding_created( - nodes[a].node.get_our_node_id(), - &get_event_msg!( - nodes[a], - MessageSendEvent::SendFundingCreated, - nodes[b].node.get_our_node_id() - ), - ); + let as_funding_created = + get_event_msg!(nodes[a], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[b].node.handle_funding_created(node_a_id, &as_funding_created); check_added_monitors!(nodes[b], 1); - let cs_funding_signed = get_event_msg!( - nodes[b], - MessageSendEvent::SendFundingSigned, - nodes[a].node.get_our_node_id() - ); - expect_channel_pending_event(&nodes[b], &nodes[a].node.get_our_node_id()); + let cs_funding_signed = + get_event_msg!(nodes[b], MessageSendEvent::SendFundingSigned, node_a_id); + expect_channel_pending_event(&nodes[b], &node_a_id); - nodes[a].node.handle_funding_signed(nodes[b].node.get_our_node_id(), &cs_funding_signed); - expect_channel_pending_event(&nodes[a], &nodes[b].node.get_our_node_id()); + nodes[a].node.handle_funding_signed(node_b_id, &cs_funding_signed); + expect_channel_pending_event(&nodes[a], &node_b_id); check_added_monitors!(nodes[a], 1); assert_eq!(nodes[a].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); @@ -1924,35 +1858,17 @@ pub fn create_unannounced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>( connect_blocks(&nodes[a], CHAN_CONFIRM_DEPTH - 1); confirm_transaction_at(&nodes[b], &tx, conf_height); connect_blocks(&nodes[b], CHAN_CONFIRM_DEPTH - 1); - let as_channel_ready = get_event_msg!( - nodes[a], - MessageSendEvent::SendChannelReady, - nodes[b].node.get_our_node_id() - ); - nodes[a].node.handle_channel_ready( - nodes[b].node.get_our_node_id(), - &get_event_msg!( - nodes[b], - MessageSendEvent::SendChannelReady, - nodes[a].node.get_our_node_id() - ), - ); - expect_channel_ready_event(&nodes[a], &nodes[b].node.get_our_node_id()); - let as_update = get_event_msg!( - nodes[a], - MessageSendEvent::SendChannelUpdate, - nodes[b].node.get_our_node_id() - ); - nodes[b].node.handle_channel_ready(nodes[a].node.get_our_node_id(), &as_channel_ready); - expect_channel_ready_event(&nodes[b], &nodes[a].node.get_our_node_id()); - let bs_update = get_event_msg!( - nodes[b], - MessageSendEvent::SendChannelUpdate, - nodes[a].node.get_our_node_id() - ); - - nodes[a].node.handle_channel_update(nodes[b].node.get_our_node_id(), &bs_update); - nodes[b].node.handle_channel_update(nodes[a].node.get_our_node_id(), &as_update); + let as_channel_ready = get_event_msg!(nodes[a], MessageSendEvent::SendChannelReady, node_b_id); + let bs_channel_ready = get_event_msg!(nodes[b], MessageSendEvent::SendChannelReady, node_a_id); + nodes[a].node.handle_channel_ready(node_b_id, &bs_channel_ready); + expect_channel_ready_event(&nodes[a], &node_b_id); + let as_update = get_event_msg!(nodes[a], MessageSendEvent::SendChannelUpdate, node_b_id); + nodes[b].node.handle_channel_ready(node_a_id, &as_channel_ready); + expect_channel_ready_event(&nodes[b], &node_a_id); + let bs_update = get_event_msg!(nodes[b], MessageSendEvent::SendChannelUpdate, node_a_id); + + nodes[a].node.handle_channel_update(node_b_id, &bs_update); + nodes[b].node.handle_channel_update(node_a_id, &as_update); let mut found_a = false; for chan in nodes[a].node.list_usable_channels() { @@ -2306,10 +2222,9 @@ pub fn close_channel<'a, 'b, 'c>( let (tx_a, tx_b); node_a.close_channel(channel_id, &node_b.get_our_node_id()).unwrap(); - node_b.handle_shutdown( - node_a.get_our_node_id(), - &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id()), - ); + let as_shutdown = + get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id()); + node_b.handle_shutdown(node_a.get_our_node_id(), &as_shutdown); let events_1 = node_b.get_and_clear_pending_msg_events(); assert!(events_1.len() >= 1); @@ -2339,14 +2254,9 @@ pub fn close_channel<'a, 'b, 'c>( assert!(node_a.get_and_clear_pending_msg_events().is_empty()); node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b.unwrap()); - node_b.handle_closing_signed( - node_a.get_our_node_id(), - &get_event_msg!( - struct_a, - MessageSendEvent::SendClosingSigned, - node_b.get_our_node_id() - ), - ); + let as_closing_signed = + get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); + node_b.handle_closing_signed(node_a.get_our_node_id(), &as_closing_signed); assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1); tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0); let (bs_update, closing_signed_b) = @@ -2361,16 +2271,11 @@ pub fn close_channel<'a, 'b, 'c>( } else { let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id()); - node_b.handle_closing_signed(node_a.get_our_node_id(), &closing_signed_a); - node_a.handle_closing_signed( - node_b.get_our_node_id(), - &get_event_msg!( - struct_b, - MessageSendEvent::SendClosingSigned, - node_a.get_our_node_id() - ), - ); + + let closing_signed_b = + get_event_msg!(struct_b, MessageSendEvent::SendClosingSigned, node_a.get_our_node_id()); + node_a.handle_closing_signed(node_b.get_our_node_id(), &closing_signed_b); assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1); tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0); @@ -2633,25 +2538,24 @@ pub fn commitment_signed_dance_through_cp_raa( pub fn do_main_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, ) -> (Option, msgs::RevokeAndACK) { - let (as_revoke_and_ack, as_commitment_signed) = - get_revoke_commit_msgs!(node_a, node_b.node.get_our_node_id()); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(node_a, node_b_id); check_added_monitors!(node_b, 0); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); - node_b.node.handle_revoke_and_ack(node_a.node.get_our_node_id(), &as_revoke_and_ack); + node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); - node_b - .node - .handle_commitment_signed_batch_test(node_a.node.get_our_node_id(), &as_commitment_signed); + node_b.node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); - let node_a_event = - remove_first_msg_event_to_node(&node_a.node.get_our_node_id(), &mut events); + let node_a_event = remove_first_msg_event_to_node(&node_a_id, &mut events); ( match node_a_event { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, node_a.node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); (*msg).clone() }, _ => panic!("Unexpected event"), @@ -2676,17 +2580,16 @@ pub fn do_commitment_signed_dance( node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, skip_last_step: bool, ) { + let node_b_id = node_b.node.get_our_node_id(); + check_added_monitors!(node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - node_a - .node - .handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); + node_a.node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); check_added_monitors!(node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let channel_id = commitment_signed[0].channel_id; - let got_claim = - node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), channel_id); + let got_claim = node_a.node.test_raa_monitor_updates_held(node_b_id, channel_id); if fail_backwards { assert!(!got_claim); } @@ -2700,7 +2603,7 @@ pub fn do_commitment_signed_dance( expect_pending_htlcs_forwardable_and_htlc_handling_failed!( node_a, [crate::events::HTLCHandlingFailureType::Forward { - node_id: Some(node_b.node.get_our_node_id()), + node_id: Some(node_b_id), channel_id }] ); @@ -2714,7 +2617,7 @@ pub fn do_commitment_signed_dance( number_of_msg_events += cp_pending_msg_events.len(); if cp_pending_msg_events.len() == 1 { if let MessageSendEvent::UpdateHTLCs { .. } = cp_pending_msg_events[0] { - assert_ne!(*cp_id, node_b.node.get_our_node_id()); + assert_ne!(*cp_id, node_b_id); } else { panic!("Unexpected event"); } @@ -3060,10 +2963,10 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); // Is the event prev_channel_id in one of the channels between the two nodes? - assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id - == prev_node.node().get_our_node_id() - && prev_node.node().get_our_node_id() == prev_node_id.unwrap() + let node_chans = node.node().list_channels(); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() && x.channel_id == prev_channel_id.unwrap() && x.user_channel_id == prev_user_channel_id.unwrap())); } @@ -3074,20 +2977,19 @@ pub fn expect_payment_forwarded>( // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. + assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node - .node() - .list_channels() + assert!(node_chans .iter() - .any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() - && next_node.node().get_our_node_id() == next_node_id.unwrap() + .any(|x| x.counterparty.node_id == next_node_id.unwrap() && x.channel_id == next_channel_id.unwrap())); } else { - assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id - == next_node.node().get_our_node_id() - && next_node.node().get_our_node_id() == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans + .iter() + .any(|x| x.counterparty.node_id == next_node_id.unwrap() + && x.channel_id == next_channel_id.unwrap() + && x.user_channel_id == next_user_channel_id.unwrap())); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); @@ -4689,6 +4591,9 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( dummy_connected = true } + let node_a_id = nodes[a].node.get_our_node_id(); + let node_b_id = nodes[b].node.get_our_node_id(); + let events_1 = nodes[a].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 2); let as_update = match events_1[1] { @@ -4700,20 +4605,20 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg }, } => { - assert_eq!(node_id, nodes[b].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert_eq!(msg.data, expected_error); if needs_err_handle { - nodes[b].node.handle_error(nodes[a].node.get_our_node_id(), msg); + nodes[b].node.handle_error(node_a_id, msg); } }, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg }, } => { - assert_eq!(node_id, nodes[b].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, expected_error); if needs_err_handle { - nodes[b].node.handle_error(nodes[a].node.get_our_node_id(), msg.as_ref().unwrap()); + nodes[b].node.handle_error(node_a_id, msg.as_ref().unwrap()); } }, _ => panic!("Unexpected event"), @@ -4738,14 +4643,14 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg }, } => { - assert_eq!(node_id, nodes[a].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert_eq!(msg.data, expected_error); }, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg }, } => { - assert_eq!(node_id, nodes[a].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert_eq!(msg.as_ref().unwrap().data, expected_error); }, _ => panic!("Unexpected event"), @@ -4754,10 +4659,9 @@ pub fn handle_announce_close_broadcast_events<'a, 'b, 'c>( if dummy_connected { disconnect_dummy_node(&nodes[b]); } - let node_id_a = nodes[a].node.get_our_node_id(); for node in nodes { - node.gossip_sync.handle_channel_update(Some(node_id_a), &as_update).unwrap(); - node.gossip_sync.handle_channel_update(Some(node_id_a), &bs_update).unwrap(); + node.gossip_sync.handle_channel_update(Some(node_a_id), &as_update).unwrap(); + node.gossip_sync.handle_channel_update(Some(node_a_id), &bs_update).unwrap(); } } @@ -4955,6 +4859,9 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b); let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a); + let node_a_id = node_a.node.get_our_node_id(); + let node_b_id = node_b.node.get_our_node_id(); + if send_channel_ready.0 { // If a expects a channel_ready, it better not think it has received a revoke_and_ack // from b @@ -4986,7 +4893,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let mut resp_1 = Vec::new(); for msg in reestablish_1 { - node_b.node.handle_channel_reestablish(node_a.node.get_our_node_id(), &msg); + node_b.node.handle_channel_reestablish(node_a_id, &msg); resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a)); } if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 { @@ -4997,7 +4904,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { let mut resp_2 = Vec::new(); for msg in reestablish_2 { - node_a.node.handle_channel_reestablish(node_b.node.get_our_node_id(), &msg); + node_a.node.handle_channel_reestablish(node_b_id, &msg); resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b)); } if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 { @@ -5022,7 +4929,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { for chan_msgs in resp_1.drain(..) { if send_channel_ready.0 { - node_a.node.handle_channel_ready(node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()); + node_a.node.handle_channel_ready(node_b_id, &chan_msgs.0.unwrap()); let announcement_event = node_a.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -5037,7 +4944,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } if pending_raa.0 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_a.node.handle_revoke_and_ack(node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()); + node_a.node.handle_revoke_and_ack(node_b_id, &chan_msgs.1.unwrap()); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_a, 1); } else { @@ -5062,15 +4969,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_a.node.handle_update_add_htlc(node_b.node.get_our_node_id(), &update_add); + node_a.node.handle_update_add_htlc(node_b_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_a - .node - .handle_update_fulfill_htlc(node_b.node.get_our_node_id(), &update_fulfill); + node_a.node.handle_update_fulfill_htlc(node_b_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_a.node.handle_update_fail_htlc(node_b.node.get_our_node_id(), &update_fail); + node_a.node.handle_update_fail_htlc(node_b_id, &update_fail); } if !pending_responding_commitment_signed.0 { @@ -5082,19 +4987,14 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); } else { node_a.node.handle_commitment_signed_batch_test( - node_b.node.get_our_node_id(), + node_b_id, &commitment_update.commitment_signed, ); check_added_monitors!(node_a, 1); - let as_revoke_and_ack = get_event_msg!( - node_a, - MessageSendEvent::SendRevokeAndACK, - node_b.node.get_our_node_id() - ); + let as_revoke_and_ack = + get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_b - .node - .handle_revoke_and_ack(node_a.node.get_our_node_id(), &as_revoke_and_ack); + node_b.node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_b, @@ -5108,7 +5008,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { for chan_msgs in resp_2.drain(..) { if send_channel_ready.1 { - node_b.node.handle_channel_ready(node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()); + node_b.node.handle_channel_ready(node_a_id, &chan_msgs.0.unwrap()); let announcement_event = node_b.node.get_and_clear_pending_msg_events(); if !announcement_event.is_empty() { assert_eq!(announcement_event.len(), 1); @@ -5123,7 +5023,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { } if pending_raa.1 { assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst); - node_b.node.handle_revoke_and_ack(node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()); + node_b.node.handle_revoke_and_ack(node_a_id, &chan_msgs.1.unwrap()); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); } else { @@ -5148,15 +5048,13 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); for update_add in commitment_update.update_add_htlcs { - node_b.node.handle_update_add_htlc(node_a.node.get_our_node_id(), &update_add); + node_b.node.handle_update_add_htlc(node_a_id, &update_add); } for update_fulfill in commitment_update.update_fulfill_htlcs { - node_b - .node - .handle_update_fulfill_htlc(node_a.node.get_our_node_id(), &update_fulfill); + node_b.node.handle_update_fulfill_htlc(node_a_id, &update_fulfill); } for update_fail in commitment_update.update_fail_htlcs { - node_b.node.handle_update_fail_htlc(node_a.node.get_our_node_id(), &update_fail); + node_b.node.handle_update_fail_htlc(node_a_id, &update_fail); } if !pending_responding_commitment_signed.1 { @@ -5168,19 +5066,14 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { ); } else { node_b.node.handle_commitment_signed_batch_test( - node_a.node.get_our_node_id(), + node_a_id, &commitment_update.commitment_signed, ); check_added_monitors!(node_b, 1); - let bs_revoke_and_ack = get_event_msg!( - node_b, - MessageSendEvent::SendRevokeAndACK, - node_a.node.get_our_node_id() - ); + let bs_revoke_and_ack = + get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - node_a - .node - .handle_revoke_and_ack(node_b.node.get_our_node_id(), &bs_revoke_and_ack); + node_a.node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!( node_a, @@ -5206,11 +5099,14 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( for (other_node, channel_value_satoshis, push_msat, user_channel_id, override_config) in params { + let funding_node_id = funding_node.node.get_our_node_id(); + let other_node_id = other_node.node.get_our_node_id(); + // Initialize channel opening. let temp_chan_id = funding_node .node .create_channel( - other_node.node.get_our_node_id(), + other_node_id, *channel_value_satoshis, *push_msat, *user_channel_id, @@ -5218,20 +5114,12 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( override_config.clone(), ) .unwrap(); - let open_channel_msg = get_event_msg!( - funding_node, - MessageSendEvent::SendOpenChannel, - other_node.node.get_our_node_id() - ); - other_node.node.handle_open_channel(funding_node.node.get_our_node_id(), &open_channel_msg); - let accept_channel_msg = get_event_msg!( - other_node, - MessageSendEvent::SendAcceptChannel, - funding_node.node.get_our_node_id() - ); - funding_node - .node - .handle_accept_channel(other_node.node.get_our_node_id(), &accept_channel_msg); + let open_channel_msg = + get_event_msg!(funding_node, MessageSendEvent::SendOpenChannel, other_node_id); + other_node.node.handle_open_channel(funding_node_id, &open_channel_msg); + let accept_channel_msg = + get_event_msg!(other_node, MessageSendEvent::SendAcceptChannel, funding_node_id); + funding_node.node.handle_accept_channel(other_node_id, &accept_channel_msg); // Create the corresponding funding output. let events = funding_node.node.get_and_clear_pending_events(); @@ -5245,7 +5133,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( user_channel_id: ref event_user_channel_id, } => { assert_eq!(temporary_channel_id, &temp_chan_id); - assert_eq!(counterparty_node_id, &other_node.node.get_our_node_id()); + assert_eq!(counterparty_node_id, &other_node_id); assert_eq!(channel_value_satoshis, event_channel_value_satoshis); assert_eq!(user_channel_id, event_user_channel_id); tx_outs.push(TxOut { @@ -5255,7 +5143,7 @@ pub fn create_batch_channel_funding<'a, 'b, 'c>( }, _ => panic!("Unexpected event"), }; - temp_chan_ids.push((temp_chan_id, other_node.node.get_our_node_id())); + temp_chan_ids.push((temp_chan_id, other_node_id)); } // Compose the batch funding transaction and give it to the ChannelManager. diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 96a3ee77dcf..623774acb5e 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -79,15 +79,9 @@ fn test_priv_forwarding_rejection() { let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 10_000); - nodes[0] - .node - .send_payment_with_route( - route.clone(), - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -108,13 +102,9 @@ fn test_priv_forwarding_rejection() { nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, true, true); - expect_payment_failed_with_update!( - nodes[0], - our_payment_hash, - false, - nodes[2].node.list_channels()[0].short_channel_id.unwrap(), - true - ); + + let chan_2_scid = nodes[2].node.list_channels()[0].short_channel_id.unwrap(); + expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2_scid, true); // Now disconnect nodes[1] from its peers and restart with accept_forwards_to_priv_channels set // to true. Sadly there is currently no way to change it at runtime. @@ -137,30 +127,19 @@ fn test_priv_forwarding_rejection() { nodes_1_deserialized ); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let bs_init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &bs_init_msg, true).unwrap(); + + let as_init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].node.peer_connected(node_a_id, &as_init_msg, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); @@ -168,30 +147,13 @@ fn test_priv_forwarding_rejection() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); - nodes[1] - .node - .peer_connected( - node_c_id, - &msgs::Init { - features: nodes[2].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[2] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let cs_init_msg = msgs::Init { + features: nodes[2].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[1].node.peer_connected(node_c_id, &cs_init_msg, true).unwrap(); + nodes[2].node.peer_connected(node_b_id, &bs_init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[2]).pop().unwrap(); let cs_reestablish = get_chan_reestablish_msgs!(nodes[2], nodes[1]).pop().unwrap(); nodes[2].node.handle_channel_reestablish(node_b_id, &bs_reestablish); @@ -199,15 +161,9 @@ fn test_priv_forwarding_rejection() { get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_c_id); get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, node_b_id); - nodes[0] - .node - .send_payment_with_route( - route, - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route( &nodes[0], @@ -388,15 +344,10 @@ fn test_routed_scid_alias() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); assert_eq!(route.paths[0].hops[1].short_channel_id, last_hop[0].inbound_scid_alias.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -623,15 +574,10 @@ fn test_inbound_scid_privacy() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, 100_000); assert_eq!(route.paths[0].hops[1].short_channel_id, last_hop[0].inbound_scid_alias.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], 100_000, payment_hash, payment_secret); @@ -649,15 +595,10 @@ fn test_inbound_scid_privacy() { let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_2, 100_000); assert_eq!(route_2.paths[0].hops[1].short_channel_id, last_hop[0].short_channel_id.unwrap()); - nodes[0] - .node - .send_payment_with_route( - route_2, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route_2, payment_hash_2, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_node(&nodes[0]); @@ -751,15 +692,10 @@ fn test_scid_alias_returned() { route.paths[0].hops[1].fee_msat = 10_000_000; // Overshoot the last channel's value // Route the HTLC through to the destination. - nodes[0] - .node - .send_payment_with_route( - route.clone(), - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); @@ -794,15 +730,10 @@ fn test_scid_alias_returned() { route.paths[0].hops[0].fee_msat = 0; // But set fee paid to the middle hop to 0 // Route the HTLC through to the destination. - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); @@ -1003,15 +934,9 @@ fn test_0conf_channel_with_async_monitor() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let as_send = SendEvent::from_node(&nodes[0]); @@ -1035,14 +960,11 @@ fn test_0conf_channel_with_async_monitor() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (_, latest_update) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&bs_raa.channel_id) - .unwrap() - .clone(); + let (_, latest_update) = { + let latest_monitor_update_id = + nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitor_update_id.get(&bs_raa.channel_id).unwrap().clone() + }; nodes[1] .chain_monitor .chain_monitor @@ -1088,13 +1010,8 @@ fn test_0conf_close_no_early_chan_update() { nodes[0].node.force_close_all_channels_broadcasting_latest_txn(error_message.to_string()); check_added_monitors!(nodes[0], 1); - check_closed_event!( - &nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [node_b_id], - 100000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); let _ = get_err_msg(&nodes[0], &node_b_id); } @@ -1206,28 +1123,18 @@ fn test_0conf_channel_reorg() { // At this point the channel no longer has an SCID again. In the future we should likely // support simply un-setting the SCID and waiting until the channel gets re-confirmed, but for // now we force-close the channel here. - check_closed_event!( - &nodes[0], - 1, - ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() - }, - [node_b_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." + .to_owned(), + }; + check_closed_event!(&nodes[0], 1, reason, [node_b_id], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); - check_closed_event!( - &nodes[1], - 1, - ClosureReason::ProcessingError { - err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." - .to_owned() - }, - [node_a_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "Funding transaction was un-confirmed. Locked at 0 confs, now have 0 confs." + .to_owned(), + }; + check_closed_event!(&nodes[1], 1, reason, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); } diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 4ada551b3d3..9826b8a39cd 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -77,20 +77,10 @@ fn pre_funding_lock_shutdown_test() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 8000000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 8000000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 8000000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 8000000); } #[test] @@ -138,20 +128,10 @@ fn expect_channel_shutdown_state() { assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -239,20 +219,10 @@ fn expect_channel_shutdown_state_with_htlc() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -317,20 +287,11 @@ fn test_lnd_bug_6039() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); // Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary assert!(nodes[0].node.list_channels().is_empty()); @@ -358,13 +319,8 @@ fn shutdown_on_unfunded_channel() { scriptpubkey: script, }, ); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyCoopClosedUnfundedChannel, - [node_b_id], - 1_000_000 - ); + let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); } #[test] @@ -381,13 +337,8 @@ fn close_on_unfunded_channel() { let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[0].node.close_channel(&chan_id, &node_b_id).unwrap(); - check_closed_event!( - nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, - [node_b_id], - 1_000_000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1_000_000); } #[test] @@ -425,13 +376,8 @@ fn expect_channel_shutdown_state_with_force_closure() { assert!(nodes[1].node.list_channels().is_empty()); check_closed_broadcast!(nodes[0], true); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - check_closed_event!( - nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [node_a_id], - 100000 - ); + let reason_b = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -495,30 +441,15 @@ fn updates_shutdown_wait() { &random_seed_bytes, ) .unwrap(); - unwrap_send_err!( - nodes[0], - nodes[0].node.send_payment_with_route( - route_1, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ), - true, - APIError::ChannelUnavailable { .. }, - {} - ); - unwrap_send_err!( - nodes[1], - nodes[1].node.send_payment_with_route( - route_2, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ), - true, - APIError::ChannelUnavailable { .. }, - {} - ); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route_1, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let res = nodes[1].node.send_payment_with_route(route_2, payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[2].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[2], 1); @@ -555,42 +486,24 @@ fn updates_shutdown_wait() { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_c_id], - 100000 - ); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } #[test] @@ -630,15 +543,11 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { amt_msat, ) }; + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); nodes[0] .node - .send_payment( - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - route_params, - Retry::Attempts(0), - ) + .send_payment(our_payment_hash, onion, id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -720,13 +629,8 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); let event1 = ExpectedCloseEvent { channel_capacity_sats: Some(100000), channel_id: None, @@ -746,13 +650,8 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { user_channel_id: None, }; check_closed_events(&nodes[1], &[event1, event2]); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } fn do_test_shutdown_rebroadcast(recv_count: u8) { @@ -784,31 +683,14 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let node_0_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let node_1_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &node_0_reestablish); @@ -873,31 +755,9 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, true).unwrap(); let node_1_2nd_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[0].node.peer_connected(node_b_id, &init_msg, false).unwrap(); if recv_count == 0 { // If all closing_signeds weren't delivered we can just resume where we left off... let node_0_2nd_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); @@ -941,13 +801,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.unwrap()); let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } else { // If one node, however, received + responded with an identical closing_signed we end // up erroring and node[0] will try to broadcast its own latest commitment transaction. @@ -978,8 +833,8 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { // closing_signed so we do it ourselves check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) } - , [node_a_id], 100000); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } assert!(nodes[0].node.list_channels().is_empty()); @@ -987,29 +842,16 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true); + assert!(nodes[1].node.list_channels().is_empty()); assert!(nodes[2].node.list_channels().is_empty()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_c_id], - 100000 - ); - check_closed_event!( - nodes[2], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_c_id], 100000); + let reason_c = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, reason_c, [node_b_id], 100000); } #[test] @@ -1193,8 +1035,8 @@ fn test_unsupported_anysegwit_upfront_shutdown_script() { }, _ => panic!("Unexpected event"), } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: OP_PUSHNUM_16 OP_PUSHBYTES_2 0028".to_string() } - , [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: "Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: OP_PUSHNUM_16 OP_PUSHBYTES_2 0028".to_string() }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[test] @@ -1504,20 +1346,12 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { // nodes[1] should happily accept and respond to. node_0_closing_signed.fee_range.as_mut().unwrap().max_fee_satoshis *= 10; { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - get_channel_ref!( - nodes[0], - nodes[1], - node_0_per_peer_lock, - node_0_peer_state_lock, - chan_id - ) - .context_mut() - .closing_fee_limits - .as_mut() - .unwrap() - .1 *= 10; + let mut per_peer_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + + chan.context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10; } nodes[1].node.handle_closing_signed(node_a_id, &node_0_closing_signed); let node_1_closing_signed = @@ -1526,21 +1360,11 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { let node_0_2nd_closing_signed = get_closing_signed_broadcast!(nodes[0].node, node_b_id); if timeout_step == TimeoutStep::NoTimeout { nodes[1].node.handle_closing_signed(node_a_id, &node_0_2nd_closing_signed.1.unwrap()); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); } if timeout_step != TimeoutStep::NoTimeout { @@ -1565,16 +1389,10 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) { ); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!( - nodes[1], - 1, - ClosureReason::ProcessingError { - err: "closing_signed negotiation failed to finish within two timer ticks" - .to_string() - }, - [node_a_id], - 100000 - ); + let reason = ClosureReason::ProcessingError { + err: "closing_signed negotiation failed to finish within two timer ticks".to_string(), + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } else { assert!(txn[0].output[0].script_pubkey.is_p2wpkh()); assert!(txn[0].output[1].script_pubkey.is_p2wpkh()); @@ -1637,20 +1455,10 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed.unwrap()); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -1721,20 +1529,10 @@ fn simple_target_feerate_shutdown() { nodes[0].node.handle_closing_signed(node_b_id, &node_1_closing_signed); let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); assert!(node_0_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { @@ -1802,14 +1600,11 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id) - .unwrap() - .clone(); + let (latest_update, _) = { + let latest_monitor_update_id = + nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitor_update_id.get(&chan_id).unwrap().clone() + }; nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, latest_update); let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1839,20 +1634,10 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(node_1_none.is_none()); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test]