@@ -18,11 +18,12 @@ use bitcoin::network::constants::Network;
1818use chain:: channelmonitor:: { ChannelMonitor , ChannelMonitorUpdateErr } ;
1919use chain:: transaction:: OutPoint ;
2020use chain:: Watch ;
21- use ln:: channelmanager:: { RAACommitmentOrder , PaymentPreimage , PaymentHash , PaymentSecret , PaymentSendFailure } ;
21+ use ln:: channelmanager:: { ChannelManager , ChannelManagerReadArgs , RAACommitmentOrder , PaymentPreimage , PaymentHash , PaymentSecret , PaymentSendFailure } ;
2222use ln:: features:: InitFeatures ;
2323use ln:: msgs;
2424use ln:: msgs:: { ChannelMessageHandler , ErrorAction , RoutingMessageHandler } ;
2525use routing:: router:: get_route;
26+ use util:: config:: UserConfig ;
2627use util:: enforcing_trait_impls:: EnforcingSigner ;
2728use util:: events:: { Event , EventsProvider , MessageSendEvent , MessageSendEventsProvider } ;
2829use util:: errors:: APIError ;
@@ -35,6 +36,8 @@ use ln::functional_test_utils::*;
3536
3637use util:: test_utils;
3738
39+ use std:: collections:: HashMap ;
40+
3841// If persister_fail is true, we have the persister return a PermanentFailure
3942// instead of the higher-level ChainMonitor.
4043fn do_test_simple_monitor_permanent_update_fail ( persister_fail : bool ) {
@@ -1967,3 +1970,202 @@ fn test_path_paused_mpp() {
19671970
19681971 claim_payment_along_route_with_secret ( & nodes[ 0 ] , & [ & [ & nodes[ 1 ] , & nodes[ 3 ] ] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] ] , false , payment_preimage, Some ( payment_secret) , 200_000 ) ;
19691972}
1973+
1974+ fn do_channel_holding_cell_serialize ( disconnect : bool , reload_a : bool ) {
1975+ // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
1976+ // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
1977+ // that behavior was both somewhat unexpected and also broken (there was a debug assertion
1978+ // which failed in such a case).
1979+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
1980+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
1981+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
1982+ let persister: test_utils:: TestPersister ;
1983+ let new_chain_monitor: test_utils:: TestChainMonitor ;
1984+ let nodes_0_deserialized: ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ;
1985+ let mut nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
1986+
1987+ let chan_id = create_announced_chan_between_nodes_with_value ( & nodes, 0 , 1 , 15_000_000 , 7_000_000_000 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) . 2 ;
1988+ let ( payment_preimage_1, payment_hash_1) = get_payment_preimage_hash ! ( & nodes[ 0 ] ) ;
1989+ let ( payment_preimage_2, payment_hash_2) = get_payment_preimage_hash ! ( & nodes[ 0 ] ) ;
1990+
1991+ // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed
1992+ // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC
1993+ // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get
1994+ // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but
1995+ // MonitorUpdateFailed is unset, and then swap the flags.
1996+ //
1997+ // We do this by:
1998+ // a) routing a payment from node B to node A,
1999+ // b) sending a payment from node A to node B without delivering any of the generated messages,
2000+ // putting node A in AwaitingRemoteRevoke,
2001+ // c) sending a second payment from node A to node B, which is immediately placed in the
2002+ // holding cell,
2003+ // d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2004+ // when we try to persist the payment preimage,
2005+ // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2006+ // clearing AwaitingRemoteRevoke on node A.
2007+ //
2008+ // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c)
2009+ // will not be freed from the holding cell.
2010+ let ( payment_preimage_0, _) = route_payment ( & nodes[ 1 ] , & [ & nodes[ 0 ] ] , 100000 ) ;
2011+
2012+ let route = {
2013+ let net_graph_msg_handler = & nodes[ 0 ] . net_graph_msg_handler ;
2014+ get_route ( & nodes[ 0 ] . node . get_our_node_id ( ) , & net_graph_msg_handler. network_graph . read ( ) . unwrap ( ) , & nodes[ 1 ] . node . get_our_node_id ( ) , None , None , & Vec :: new ( ) , 100000 , TEST_FINAL_CLTV , nodes[ 0 ] . logger ) . unwrap ( )
2015+ } ;
2016+
2017+ nodes[ 0 ] . node . send_payment ( & route, payment_hash_1, & None ) . unwrap ( ) ;
2018+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2019+ let send = SendEvent :: from_node ( & nodes[ 0 ] ) ;
2020+ assert_eq ! ( send. msgs. len( ) , 1 ) ;
2021+
2022+ nodes[ 0 ] . node . send_payment ( & route, payment_hash_2, & None ) . unwrap ( ) ;
2023+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
2024+
2025+ * nodes[ 0 ] . chain_monitor . update_ret . lock ( ) . unwrap ( ) = Some ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
2026+ assert ! ( nodes[ 0 ] . node. claim_funds( payment_preimage_0, & None , 100000 ) ) ;
2027+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2028+
2029+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & send. msgs [ 0 ] ) ;
2030+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & send. commitment_msg ) ;
2031+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
2032+
2033+ let ( raa, cs) = get_revoke_commit_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
2034+
2035+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & raa) ;
2036+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2037+
2038+ if disconnect {
2039+ // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2040+ // disconnect the peers. Note that the fuzzer originally found this issue because
2041+ // deserializing a ChannelManager in this state causes an assertion failure.
2042+ if reload_a {
2043+ let nodes_0_serialized = nodes[ 0 ] . node . encode ( ) ;
2044+ let mut chan_0_monitor_serialized = test_utils:: TestVecWriter ( Vec :: new ( ) ) ;
2045+ nodes[ 0 ] . chain_monitor . chain_monitor . monitors . read ( ) . unwrap ( ) . iter ( ) . next ( ) . unwrap ( ) . 1 . write ( & mut chan_0_monitor_serialized) . unwrap ( ) ;
2046+
2047+ persister = test_utils:: TestPersister :: new ( ) ;
2048+ let keys_manager = & chanmon_cfgs[ 0 ] . keys_manager ;
2049+ new_chain_monitor = test_utils:: TestChainMonitor :: new ( Some ( nodes[ 0 ] . chain_source ) , nodes[ 0 ] . tx_broadcaster . clone ( ) , nodes[ 0 ] . logger , node_cfgs[ 0 ] . fee_estimator , & persister, keys_manager) ;
2050+ nodes[ 0 ] . chain_monitor = & new_chain_monitor;
2051+ let mut chan_0_monitor_read = & chan_0_monitor_serialized. 0 [ ..] ;
2052+ let ( _, mut chan_0_monitor) = <( BlockHash , ChannelMonitor < EnforcingSigner > ) >:: read (
2053+ & mut chan_0_monitor_read, keys_manager) . unwrap ( ) ;
2054+ assert ! ( chan_0_monitor_read. is_empty( ) ) ;
2055+
2056+ let mut nodes_0_read = & nodes_0_serialized[ ..] ;
2057+ let config = UserConfig :: default ( ) ;
2058+ nodes_0_deserialized = {
2059+ let mut channel_monitors = HashMap :: new ( ) ;
2060+ channel_monitors. insert ( chan_0_monitor. get_funding_txo ( ) . 0 , & mut chan_0_monitor) ;
2061+ <( BlockHash , ChannelManager < EnforcingSigner , & test_utils:: TestChainMonitor , & test_utils:: TestBroadcaster , & test_utils:: TestKeysInterface , & test_utils:: TestFeeEstimator , & test_utils:: TestLogger > ) >:: read ( & mut nodes_0_read, ChannelManagerReadArgs {
2062+ default_config : config,
2063+ keys_manager,
2064+ fee_estimator : node_cfgs[ 0 ] . fee_estimator ,
2065+ chain_monitor : nodes[ 0 ] . chain_monitor ,
2066+ tx_broadcaster : nodes[ 0 ] . tx_broadcaster . clone ( ) ,
2067+ logger : nodes[ 0 ] . logger ,
2068+ channel_monitors,
2069+ } ) . unwrap ( ) . 1
2070+ } ;
2071+ nodes[ 0 ] . node = & nodes_0_deserialized;
2072+ assert ! ( nodes_0_read. is_empty( ) ) ;
2073+
2074+ nodes[ 0 ] . chain_monitor . watch_channel ( chan_0_monitor. get_funding_txo ( ) . 0 . clone ( ) , chan_0_monitor) . unwrap ( ) ;
2075+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2076+ } else {
2077+ nodes[ 0 ] . node . peer_disconnected ( & nodes[ 1 ] . node . get_our_node_id ( ) , false ) ;
2078+ }
2079+ nodes[ 1 ] . node . peer_disconnected ( & nodes[ 0 ] . node . get_our_node_id ( ) , false ) ;
2080+
2081+ // Now reconnect the two
2082+ nodes[ 0 ] . node . peer_connected ( & nodes[ 1 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: empty ( ) } ) ;
2083+ let reestablish_1 = get_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
2084+ assert_eq ! ( reestablish_1. len( ) , 1 ) ;
2085+ nodes[ 1 ] . node . peer_connected ( & nodes[ 0 ] . node . get_our_node_id ( ) , & msgs:: Init { features : InitFeatures :: empty ( ) } ) ;
2086+ let reestablish_2 = get_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
2087+ assert_eq ! ( reestablish_2. len( ) , 1 ) ;
2088+
2089+ nodes[ 1 ] . node . handle_channel_reestablish ( & nodes[ 0 ] . node . get_our_node_id ( ) , & reestablish_1[ 0 ] ) ;
2090+ let resp_1 = handle_chan_reestablish_msgs ! ( nodes[ 1 ] , nodes[ 0 ] ) ;
2091+ check_added_monitors ! ( nodes[ 1 ] , 0 ) ;
2092+
2093+ nodes[ 0 ] . node . handle_channel_reestablish ( & nodes[ 1 ] . node . get_our_node_id ( ) , & reestablish_2[ 0 ] ) ;
2094+ let resp_0 = handle_chan_reestablish_msgs ! ( nodes[ 0 ] , nodes[ 1 ] ) ;
2095+
2096+ assert ! ( resp_0. 0 . is_none( ) ) ;
2097+ assert ! ( resp_0. 1 . is_none( ) ) ;
2098+ assert ! ( resp_0. 2 . is_none( ) ) ;
2099+ assert ! ( resp_1. 0 . is_none( ) ) ;
2100+ assert ! ( resp_1. 1 . is_none( ) ) ;
2101+
2102+ // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2103+ // moment).
2104+ if let Some ( pending_cs) = resp_1. 2 {
2105+ assert ! ( pending_cs. update_add_htlcs. is_empty( ) ) ;
2106+ assert ! ( pending_cs. update_fail_htlcs. is_empty( ) ) ;
2107+ assert ! ( pending_cs. update_fulfill_htlcs. is_empty( ) ) ;
2108+ assert_eq ! ( pending_cs. commitment_signed, cs) ;
2109+ } else { panic ! ( ) ; }
2110+
2111+ // There should be no monitor updates as we are still pending awaiting a failed one.
2112+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
2113+ check_added_monitors ! ( nodes[ 1 ] , 0 ) ;
2114+ }
2115+
2116+ // If we finish updating the monitor, we should free the holding cell right away (this did
2117+ // not occur prior to #756).
2118+ * nodes[ 0 ] . chain_monitor . update_ret . lock ( ) . unwrap ( ) = None ;
2119+ let ( funding_txo, mon_id) = nodes[ 0 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id) . unwrap ( ) . clone ( ) ;
2120+ nodes[ 0 ] . node . channel_monitor_updated ( & funding_txo, mon_id) ;
2121+
2122+ // New outbound messages should be generated immediately upon a call to
2123+ // get_and_clear_pending_msg_events (but not before).
2124+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
2125+ let mut events = nodes[ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
2126+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2127+ assert_eq ! ( events. len( ) , 1 ) ;
2128+
2129+ // Deliver the pending in-flight CS
2130+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & cs) ;
2131+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
2132+
2133+ let commitment_msg = match events. pop ( ) . unwrap ( ) {
2134+ MessageSendEvent :: UpdateHTLCs { node_id, updates } => {
2135+ assert_eq ! ( node_id, nodes[ 1 ] . node. get_our_node_id( ) ) ;
2136+ assert ! ( updates. update_fail_htlcs. is_empty( ) ) ;
2137+ assert ! ( updates. update_fail_malformed_htlcs. is_empty( ) ) ;
2138+ assert ! ( updates. update_fee. is_none( ) ) ;
2139+ assert_eq ! ( updates. update_fulfill_htlcs. len( ) , 1 ) ;
2140+ nodes[ 1 ] . node . handle_update_fulfill_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
2141+ expect_payment_sent ! ( nodes[ 1 ] , payment_preimage_0) ;
2142+ assert_eq ! ( updates. update_add_htlcs. len( ) , 1 ) ;
2143+ nodes[ 1 ] . node . handle_update_add_htlc ( & nodes[ 0 ] . node . get_our_node_id ( ) , & updates. update_add_htlcs [ 0 ] ) ;
2144+ updates. commitment_signed
2145+ } ,
2146+ _ => panic ! ( "Unexpected event type!" ) ,
2147+ } ;
2148+
2149+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & commitment_msg) ;
2150+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
2151+
2152+ let as_revoke_and_ack = get_event_msg ! ( nodes[ 0 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 1 ] . node. get_our_node_id( ) ) ;
2153+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_revoke_and_ack) ;
2154+ expect_pending_htlcs_forwardable ! ( nodes[ 1 ] ) ;
2155+ expect_payment_received ! ( nodes[ 1 ] , payment_hash_1, 100000 ) ;
2156+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
2157+
2158+ commitment_signed_dance ! ( nodes[ 1 ] , nodes[ 0 ] , ( ) , false , true , false ) ;
2159+
2160+ expect_pending_htlcs_forwardable ! ( nodes[ 1 ] ) ;
2161+ expect_payment_received ! ( nodes[ 1 ] , payment_hash_2, 100000 ) ;
2162+
2163+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , payment_preimage_1, 100000 ) ;
2164+ claim_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , payment_preimage_2, 100000 ) ;
2165+ }
2166+ #[ test]
2167+ fn channel_holding_cell_serialize ( ) {
2168+ do_channel_holding_cell_serialize ( true , true ) ;
2169+ do_channel_holding_cell_serialize ( true , false ) ;
2170+ do_channel_holding_cell_serialize ( false , true ) ; // last arg doesn't matter
2171+ }
0 commit comments