@@ -3496,6 +3496,34 @@ fn test_force_close_fail_back() {
34963496 check_spends ! ( node_txn[ 0 ] , tx) ;
34973497}
34983498
3499+ #[ test]
3500+ fn test_dup_events_on_peer_disconnect ( ) {
3501+ // Test that if we receive a duplicative update_fulfill_htlc message after a reconnect we do
3502+ // not generate a corresponding duplicative PaymentSent event. This did not use to be the case
3503+ // as we used to generate the event immediately upon receipt of the payment preimage in the
3504+ // update_fulfill_htlc message.
3505+
3506+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
3507+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
3508+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
3509+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
3510+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
3511+
3512+ let payment_preimage = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1000000 ) . 0 ;
3513+
3514+ assert ! ( nodes[ 1 ] . node. claim_funds( payment_preimage) ) ;
3515+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
3516+ let claim_msgs = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
3517+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & claim_msgs. update_fulfill_htlcs [ 0 ] ) ;
3518+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
3519+
3520+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) , false ) ;
3521+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) , false ) ;
3522+
3523+ reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( false , false ) , ( 0 , 0 ) , ( 1 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
3524+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
3525+ }
3526+
34993527#[ test]
35003528fn test_simple_peer_disconnect ( ) {
35013529 // Test that we can reconnect when there are no lost messages
@@ -3718,8 +3746,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
37183746 nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) , false ) ;
37193747 if messages_delivered < 2 {
37203748 reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( false , false ) , ( 0 , 0 ) , ( 1 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
3721- //TODO: Deduplicate PaymentSent events, then enable this if:
3722- //if messages_delivered < 1 {
3749+ if messages_delivered < 1 {
37233750 let events_4 = nodes[ 0 ] . node . get_and_clear_pending_events ( ) ;
37243751 assert_eq ! ( events_4. len( ) , 1 ) ;
37253752 match events_4[ 0 ] {
@@ -3728,7 +3755,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
37283755 } ,
37293756 _ => panic ! ( "Unexpected event" ) ,
37303757 }
3731- //}
3758+ } else {
3759+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
3760+ }
37323761 } else if messages_delivered == 2 {
37333762 // nodes[0] still wants its RAA + commitment_signed
37343763 reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( false , false ) , ( 0 , -1 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , true ) ) ;
@@ -4302,6 +4331,108 @@ fn test_no_txn_manager_serialize_deserialize() {
43024331 send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1000000 ) ;
43034332}
43044333
4334+ #[ test]
4335+ fn test_dup_htlc_onchain_fails_on_reload ( ) {
4336+ // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
4337+ // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
4338+ // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
4339+ // the ChannelMonitor tells it to.
4340+ //
4341+ // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4342+ // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4343+ // PaymentFailed event appearing). However, because we may not serialize the relevant
4344+ // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4345+ // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4346+ // and de-duplicates ChannelMonitor events.
4347+ //
4348+ // This tests that explicit tracking behavior.
4349+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
4350+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
4351+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
4352+ let persister: test_utils:: TestPersister ;
4353+ let new_chain_monitor: test_utils:: TestChainMonitor ;
4354+ let nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
4355+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
4356+
4357+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
4358+
4359+ // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
4360+ // nodes[0].
4361+ let ( payment_preimage, _, _) = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 10000000 ) ;
4362+ nodes[ 0 ] . node . force_close_channel ( & nodes[ 0 ] . node . list_channels ( ) [ 0 ] . channel_id ) . unwrap ( ) ;
4363+ check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
4364+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
4365+
4366+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) , false ) ;
4367+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) , false ) ;
4368+
4369+ let node_txn = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
4370+ assert_eq ! ( node_txn. len( ) , 2 ) ;
4371+
4372+ assert ! ( nodes[ 1 ] . node. claim_funds( payment_preimage) ) ;
4373+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
4374+
4375+ let mut header = BlockHeader { version : 0x20000000 , prev_blockhash : nodes[ 1 ] . best_block_hash ( ) , merkle_root : Default :: default ( ) , time : 42 , bits : 42 , nonce : 42 } ;
4376+ connect_block ( & nodes[ 1 ] , & Block { header, txdata : vec ! [ node_txn[ 0 ] . clone( ) , node_txn[ 1 ] . clone( ) ] } ) ;
4377+ check_closed_broadcast ! ( nodes[ 1 ] , true ) ;
4378+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
4379+ let claim_txn = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
4380+
4381+ connect_block ( & nodes[ 0 ] , & Block { header, txdata : node_txn} ) ;
4382+
4383+ // Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4384+ // fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4385+ // happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4386+ let mut chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
4387+ nodes[ 0 ] . chain_monitor . chain_monitor . monitors . read ( ) . unwrap ( ) . iter ( ) . next ( ) . unwrap ( ) . 1 . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
4388+
4389+ header. prev_blockhash = header. block_hash ( ) ;
4390+ let claim_block = Block { header, txdata : claim_txn} ;
4391+ connect_block ( & nodes[ 0 ] , & claim_block) ;
4392+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
4393+
4394+ // ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4395+ // connected a highly-relevant block, it likely gets serialized out now.
4396+ let mut chan_manager_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
4397+ nodes[ 0 ] . node . write ( & mut chan_manager_serialized) . unwrap ( ) ;
4398+
4399+ // Now reload nodes[0]...
4400+ persister = test_utils:: TestPersister :: new ( ) ;
4401+ let keys_manager = & chanmon_cfgs[ 0 ] . keys_manager ;
4402+ new_chain_monitor = test_utils:: TestChainMonitor :: new ( Some ( nodes[ 0 ] . chain_source ) , nodes[ 0 ] . tx_broadcaster . clone ( ) , nodes[ 0 ] . logger , node_cfgs[ 0 ] . fee_estimator , & persister, keys_manager) ;
4403+ nodes[ 0 ] . chain_monitor = & new_chain_monitor;
4404+ let mut chan_0_monitor_read = & chan_0_monitor_serialized. 0 [ ..] ;
4405+ let ( _, mut chan_0_monitor) = <( BlockHash , ChannelMonitor < EnforcingSigner > ) >:: read (
4406+ & mut chan_0_monitor_read, keys_manager) . unwrap ( ) ;
4407+ assert ! ( chan_0_monitor_read. is_empty( ) ) ;
4408+
4409+ let ( _, nodes_0_deserialized_tmp) = {
4410+ let mut channel_monitors = HashMap :: new ( ) ;
4411+ channel_monitors. insert ( chan_0_monitor. get_funding_txo ( ) . 0 , & mut chan_0_monitor) ;
4412+ <( BlockHash , ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ) >
4413+ :: read ( & mut std:: io:: Cursor :: new ( & chan_manager_serialized. 0 [ ..] ) , ChannelManagerReadArgs {
4414+ default_config : Default :: default ( ) ,
4415+ keys_manager,
4416+ fee_estimator : node_cfgs[ 0 ] . fee_estimator ,
4417+ chain_monitor : nodes[ 0 ] . chain_monitor ,
4418+ tx_broadcaster : nodes[ 0 ] . tx_broadcaster . clone ( ) ,
4419+ logger : nodes[ 0 ] . logger ,
4420+ channel_monitors,
4421+ } ) . unwrap ( )
4422+ } ;
4423+ nodes_0_deserialized = nodes_0_deserialized_tmp;
4424+
4425+ assert ! ( nodes[ 0 ] . chain_monitor. watch_channel( chan_0_monitor. get_funding_txo( ) . 0 , chan_0_monitor) . is_ok( ) ) ;
4426+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
4427+ nodes[ 0 ] . node = & nodes_0_deserialized;
4428+
4429+ // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
4430+ // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
4431+ // payment events should kick in, leaving us with no pending events here.
4432+ nodes[ 0 ] . chain_monitor . chain_monitor . block_connected ( & claim_block, nodes[ 0 ] . blocks . borrow ( ) . len ( ) as u32 - 1 ) ;
4433+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
4434+ }
4435+
43054436#[ test]
43064437fn test_manager_serialize_deserialize_events ( ) {
43074438 // This test makes sure the events field in ChannelManager survives de/serialization
0 commit comments