@@ -16,7 +16,7 @@ use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERI
1616use chain:: transaction:: OutPoint ;
1717use chain:: keysinterface:: KeysInterface ;
1818use ln:: channel:: EXPIRE_PREV_CONFIG_TICKS ;
19- use ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChannelManager , ChannelManagerReadArgs , MPP_TIMEOUT_TICKS , PaymentId , PaymentSendFailure } ;
19+ use ln:: channelmanager:: { BREAKDOWN_TIMEOUT , ChannelManager , ChannelManagerReadArgs , MPP_TIMEOUT_TICKS , MIN_CLTV_EXPIRY_DELTA , PaymentId , PaymentSendFailure } ;
2020use ln:: features:: { InitFeatures , InvoiceFeatures } ;
2121use ln:: msgs;
2222use ln:: msgs:: ChannelMessageHandler ;
@@ -563,6 +563,231 @@ fn retry_with_no_persist() {
563563 do_retry_with_no_persist ( false ) ;
564564}
565565
566+ fn do_test_completed_payment_not_retryable_on_reload ( use_dust : bool ) {
567+ // Test that an off-chain completed payment is not retryable on restart. This was previously
568+ // broken for dust payments, but we test for both dust and non-dust payments.
569+ //
570+ // `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
571+ // output at all.
572+ let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
573+ let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs) ;
574+
575+ let mut manually_accept_config = test_default_channel_config ( ) ;
576+ manually_accept_config. manually_accept_inbound_channels = true ;
577+
578+ let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs, & [ None , Some ( manually_accept_config) , None ] ) ;
579+
580+ let first_persister: test_utils:: TestPersister ;
581+ let first_new_chain_monitor: test_utils:: TestChainMonitor ;
582+ let first_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
583+ let second_persister: test_utils:: TestPersister ;
584+ let second_new_chain_monitor: test_utils:: TestChainMonitor ;
585+ let second_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
586+ let third_persister: test_utils:: TestPersister ;
587+ let third_new_chain_monitor: test_utils:: TestChainMonitor ;
588+ let third_nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
589+
590+ let mut nodes = create_network ( 3 , & node_cfgs, & node_chanmgrs) ;
591+
592+ // Because we set nodes[1] to manually accept channels, just open a 0-conf channel.
593+ let ( funding_tx, chan_id) = open_zero_conf_channel ( & nodes[ 0 ] , & nodes[ 1 ] , None ) ;
594+ confirm_transaction ( & nodes[ 0 ] , & funding_tx) ;
595+ confirm_transaction ( & nodes[ 1 ] , & funding_tx) ;
596+ // Ignore the announcement_signatures messages
597+ nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
598+ nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
599+ let chan_id_2 = create_announced_chan_between_nodes ( & nodes, 1 , 2 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) . 2 ;
600+
601+ // Serialize the ChannelManager prior to sending payments
602+ let mut nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
603+
604+ let route = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) . 0 ;
605+ let ( payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route ( & nodes[ 0 ] , route, & [ & nodes[ 1 ] , & nodes[ 2 ] ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
606+
607+ // The ChannelMonitor should always be the latest version, as we're required to persist it
608+ // during the `commitment_signed_dance!()`.
609+ let mut chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
610+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
611+
612+ let mut chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
613+
614+ macro_rules! reload_node {
615+ ( $chain_monitor: ident, $chan_manager: ident, $persister: ident) => { {
616+ $persister = test_utils:: TestPersister :: new( ) ;
617+ let keys_manager = & chanmon_cfgs[ 0 ] . keys_manager;
618+ $chain_monitor = test_utils:: TestChainMonitor :: new( Some ( nodes[ 0 ] . chain_source) , nodes[ 0 ] . tx_broadcaster. clone( ) , nodes[ 0 ] . logger, node_cfgs[ 0 ] . fee_estimator, & $persister, keys_manager) ;
619+ nodes[ 0 ] . chain_monitor = & $chain_monitor;
620+ let mut chan_0_monitor_read = & chan_0_monitor_serialized. 0 [ ..] ;
621+ let ( _, mut chan_0_monitor) = <( BlockHash , ChannelMonitor <EnforcingSigner >) >:: read(
622+ & mut chan_0_monitor_read, keys_manager) . unwrap( ) ;
623+ assert!( chan_0_monitor_read. is_empty( ) ) ;
624+
625+ let mut chan_1_monitor = None ;
626+ let mut channel_monitors = HashMap :: new( ) ;
627+ channel_monitors. insert( chan_0_monitor. get_funding_txo( ) . 0 , & mut chan_0_monitor) ;
628+
629+ if !chan_1_monitor_serialized. 0 . is_empty( ) {
630+ let mut chan_1_monitor_read = & chan_1_monitor_serialized. 0 [ ..] ;
631+ chan_1_monitor = Some ( <( BlockHash , ChannelMonitor <EnforcingSigner >) >:: read(
632+ & mut chan_1_monitor_read, keys_manager) . unwrap( ) . 1 ) ;
633+ assert!( chan_1_monitor_read. is_empty( ) ) ;
634+ channel_monitors. insert( chan_1_monitor. as_ref( ) . unwrap( ) . get_funding_txo( ) . 0 , chan_1_monitor. as_mut( ) . unwrap( ) ) ;
635+ }
636+
637+ let mut nodes_0_read = & nodes_0_serialized[ ..] ;
638+ let ( _, nodes_0_deserialized_tmp) = {
639+ <( BlockHash , ChannelManager <EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger >) >:: read( & mut nodes_0_read, ChannelManagerReadArgs {
640+ default_config: test_default_channel_config( ) ,
641+ keys_manager,
642+ fee_estimator: node_cfgs[ 0 ] . fee_estimator,
643+ chain_monitor: nodes[ 0 ] . chain_monitor,
644+ tx_broadcaster: nodes[ 0 ] . tx_broadcaster. clone( ) ,
645+ logger: nodes[ 0 ] . logger,
646+ channel_monitors,
647+ } ) . unwrap( )
648+ } ;
649+ $chan_manager = nodes_0_deserialized_tmp;
650+ assert!( nodes_0_read. is_empty( ) ) ;
651+
652+ assert!( nodes[ 0 ] . chain_monitor. watch_channel( chan_0_monitor. get_funding_txo( ) . 0 , chan_0_monitor) . is_ok( ) ) ;
653+ if !chan_1_monitor_serialized. 0 . is_empty( ) {
654+ let funding_txo = chan_1_monitor. as_ref( ) . unwrap( ) . get_funding_txo( ) . 0 ;
655+ assert!( nodes[ 0 ] . chain_monitor. watch_channel( funding_txo, chan_1_monitor. unwrap( ) ) . is_ok( ) ) ;
656+ }
657+ nodes[ 0 ] . node = & $chan_manager;
658+ check_added_monitors!( nodes[ 0 ] , if !chan_1_monitor_serialized. 0 . is_empty( ) { 2 } else { 1 } ) ;
659+
660+ nodes[ 1 ] . node. peer_disconnected( & nodes[ 0 ] . node. get_our_node_id( ) , false ) ;
661+ } }
662+ }
663+
664+ reload_node ! ( first_new_chain_monitor, first_nodes_0_deserialized, first_persister) ;
665+
666+ // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
667+ // force-close the channel.
668+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: OutdatedChannelManager ) ;
669+ assert ! ( nodes[ 0 ] . node. list_channels( ) . is_empty( ) ) ;
670+ assert ! ( nodes[ 0 ] . node. has_pending_payments( ) ) ;
671+ assert_eq ! ( nodes[ 0 ] . tx_broadcaster. txn_broadcasted. lock( ) . unwrap( ) . split_off( 0 ) . len( ) , 1 ) ;
672+
673+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: known ( ) , remote_network_address : None } ) ;
674+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
675+
676+ // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
677+ // error, as the channel has hit the chain.
678+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: known ( ) , remote_network_address : None } ) ;
679+ let bs_reestablish = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendChannelReestablish , nodes[ 0 ] . node. get_our_node_id( ) ) ;
680+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_reestablish) ;
681+ let as_err = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
682+ assert_eq ! ( as_err. len( ) , 1 ) ;
683+ let bs_commitment_tx;
684+ match as_err[ 0 ] {
685+ MessageSendEvent :: HandleError { node_id, action : msgs:: ErrorAction :: SendErrorMessage { ref msg } } => {
686+ assert_eq ! ( node_id, nodes[ 1 ] . node. get_our_node_id( ) ) ;
687+ nodes[ 1 ] . node . handle_error ( & nodes[ 0 ] . node . get_our_node_id ( ) , msg) ;
688+ check_closed_event ! ( nodes[ 1 ] , 1 , ClosureReason :: CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel" . to_string( ) } ) ;
689+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
690+ bs_commitment_tx = nodes[ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
691+ } ,
692+ _ => panic ! ( "Unexpected event" ) ,
693+ }
694+ check_closed_broadcast ! ( nodes[ 1 ] , false ) ;
695+
696+ // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
697+ // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
698+ // incoming HTLCs with the same payment hash later.
699+ nodes[ 2 ] . node . fail_htlc_backwards ( & payment_hash) ;
700+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( nodes[ 2 ] , [ HTLCDestination :: FailedPayment { payment_hash } ] ) ;
701+ check_added_monitors ! ( nodes[ 2 ] , 1 ) ;
702+
703+ let htlc_fulfill_updates = get_htlc_update_msgs ! ( nodes[ 2 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
704+ nodes[ 1 ] . node . handle_update_fail_htlc ( & nodes[ 2 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates. update_fail_htlcs [ 0 ] ) ;
705+ commitment_signed_dance ! ( nodes[ 1 ] , nodes[ 2 ] , htlc_fulfill_updates. commitment_signed, false ) ;
706+ expect_pending_htlcs_forwardable_and_htlc_handling_failed ! ( nodes[ 1 ] ,
707+ [ HTLCDestination :: NextHopChannel { node_id: Some ( nodes[ 2 ] . node. get_our_node_id( ) ) , channel_id: chan_id_2 } ] ) ;
708+
709+ // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
710+ // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
711+ // after the commitment transaction, so always connect the commitment transaction.
712+ mine_transaction ( & nodes[ 0 ] , & bs_commitment_tx[ 0 ] ) ;
713+ mine_transaction ( & nodes[ 1 ] , & bs_commitment_tx[ 0 ] ) ;
714+ if !use_dust {
715+ connect_blocks ( & nodes[ 0 ] , TEST_FINAL_CLTV - 1 + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
716+ connect_blocks ( & nodes[ 1 ] , TEST_FINAL_CLTV - 1 + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
717+ let as_htlc_timeout = nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
718+ check_spends ! ( as_htlc_timeout[ 0 ] , bs_commitment_tx[ 0 ] ) ;
719+ assert_eq ! ( as_htlc_timeout. len( ) , 1 ) ;
720+
721+ mine_transaction ( & nodes[ 0 ] , & as_htlc_timeout[ 0 ] ) ;
722+ // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
723+ nodes[ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
724+ mine_transaction ( & nodes[ 1 ] , & as_htlc_timeout[ 0 ] ) ;
725+ }
726+
727+ // Create a new channel on which to retry the payment before we fail the payment via the
728+ // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
729+ // connecting several blocks while creating the channel (implying time has passed).
730+ // We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
731+ let ( _, chan_id_3) = open_zero_conf_channel ( & nodes[ 0 ] , & nodes[ 1 ] , None ) ;
732+ assert_eq ! ( nodes[ 0 ] . node. list_usable_channels( ) . len( ) , 1 ) ;
733+
734+ // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
735+ // confirming, we will fail as it's considered still-pending...
736+ let ( new_route, _, _, _) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
737+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
738+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
739+
740+ // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
741+ // again. We serialize the node first as we'll then test retrying the HTLC after a restart
742+ // (which should also still work).
743+ connect_blocks ( & nodes[ 0 ] , ANTI_REORG_DELAY - 1 ) ;
744+ connect_blocks ( & nodes[ 1 ] , ANTI_REORG_DELAY - 1 ) ;
745+ // We set mpp_parts_remain to avoid having abandon_payment called
746+ expect_payment_failed_conditions ( & nodes[ 0 ] , payment_hash, false , PaymentFailedConditions :: new ( ) . mpp_parts_remain ( ) ) ;
747+
748+ chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
749+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
750+ chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
751+ get_monitor ! ( nodes[ 0 ] , chan_id_3) . write ( & mut chan_1_monitor_serialized) . unwrap ( ) ;
752+ nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
753+
754+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_ok( ) ) ;
755+ assert ! ( !nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
756+
757+ reload_node ! ( second_new_chain_monitor, second_nodes_0_deserialized, second_persister) ;
758+ reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( true , true ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
759+
760+ // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
761+ // the payment is not (spuriously) listed as still pending.
762+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_ok( ) ) ;
763+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
764+ pass_along_route ( & nodes[ 0 ] , & [ & [ & nodes[ 1 ] , & nodes[ 2 ] ] ] , if use_dust { 1_000 } else { 1_000_000 } , payment_hash, payment_secret) ;
765+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] , & nodes[ 2 ] ] , payment_preimage) ;
766+
767+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
768+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
769+
770+ chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
771+ get_monitor ! ( nodes[ 0 ] , chan_id) . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
772+ chan_1_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
773+ get_monitor ! ( nodes[ 0 ] , chan_id_3) . write ( & mut chan_1_monitor_serialized) . unwrap ( ) ;
774+ nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
775+
776+ // Ensure that after reload we cannot retry the payment.
777+ reload_node ! ( third_new_chain_monitor, third_nodes_0_deserialized, third_persister) ;
778+ reconnect_nodes ( & nodes[ 0 ] , & nodes[ 1 ] , ( false , false ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( 0 , 0 ) , ( false , false ) ) ;
779+
780+ assert ! ( nodes[ 0 ] . node. retry_payment( & new_route, payment_id) . is_err( ) ) ;
781+ assert ! ( nodes[ 0 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
782+ }
783+
784+ #[ test]
785+ fn test_completed_payment_not_retryable_on_reload ( ) {
786+ do_test_completed_payment_not_retryable_on_reload ( true ) ;
787+ do_test_completed_payment_not_retryable_on_reload ( false ) ;
788+ }
789+
790+
566791fn do_test_dup_htlc_onchain_fails_on_reload ( persist_manager_post_event : bool , confirm_commitment_tx : bool , payment_timeout : bool ) {
567792 // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
568793 // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
0 commit comments