@@ -25,7 +25,7 @@ use chain;
2525use chain:: Access ;
2626use ln:: features:: { ChannelFeatures , NodeFeatures } ;
2727use ln:: msgs:: { DecodeError , ErrorAction , Init , LightningError , RoutingMessageHandler , NetAddress , MAX_VALUE_MSAT } ;
28- use ln:: msgs:: { ChannelAnnouncement , ChannelUpdate , NodeAnnouncement , OptionalField } ;
28+ use ln:: msgs:: { ChannelAnnouncement , ChannelUpdate , NodeAnnouncement , OptionalField , GossipTimestampFilter } ;
2929use ln:: msgs:: { QueryChannelRange , ReplyChannelRange , QueryShortChannelIds , ReplyShortChannelIdsEnd } ;
3030use ln:: msgs;
3131use util:: ser:: { Writeable , Readable , Writer } ;
@@ -395,13 +395,28 @@ where C::Target: chain::Access, L::Target: Logger
395395 /// to request gossip messages for each channel. The sync is considered complete
396396 /// when the final reply_scids_end message is received, though we are not
397397 /// tracking this directly.
398- fn sync_routing_table ( & self , their_node_id : & PublicKey , init_msg : & Init ) {
399-
398+ fn peer_connected ( & self , their_node_id : & PublicKey , init_msg : & Init ) {
400399 // We will only perform a sync with peers that support gossip_queries.
401400 if !init_msg. features . supports_gossip_queries ( ) {
402401 return ( ) ;
403402 }
404403
404+ // Send a gossip_timestamp_filter to enable gossip message receipt. Note that we have to
405+ // use a "all timestamps" filter as sending the current timestamp would result in missing
406+ // gossip messages that are simply sent late. We could calculate the intended filter time
407+ // by looking at the current time and subtracting two weeks (before which we'll reject
408+ // messages), but there's not a lot of reason to bother - our peers should be discarding
409+ // the same messages.
410+ let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
411+ pending_events. push ( MessageSendEvent :: SendGossipTimestampFilter {
412+ node_id : their_node_id. clone ( ) ,
413+ msg : GossipTimestampFilter {
414+ chain_hash : self . network_graph . genesis_hash ,
415+ first_timestamp : 0 ,
416+ timestamp_range : u32:: max_value ( ) ,
417+ } ,
418+ } ) ;
419+
405420 // Check if we need to perform a full synchronization with this peer
406421 if !self . should_request_full_sync ( & their_node_id) {
407422 return ( ) ;
@@ -410,7 +425,6 @@ where C::Target: chain::Access, L::Target: Logger
410425 let first_blocknum = 0 ;
411426 let number_of_blocks = 0xffffffff ;
412427 log_debug ! ( self . logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}" , log_pubkey!( their_node_id) , first_blocknum, number_of_blocks) ;
413- let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
414428 pending_events. push ( MessageSendEvent :: SendChannelRangeQuery {
415429 node_id : their_node_id. clone ( ) ,
416430 msg : QueryChannelRange {
@@ -2271,18 +2285,27 @@ mod tests {
22712285 // It should ignore if gossip_queries feature is not enabled
22722286 {
22732287 let init_msg = Init { features : InitFeatures :: known ( ) . clear_gossip_queries ( ) } ;
2274- net_graph_msg_handler. sync_routing_table ( & node_id_1, & init_msg) ;
2288+ net_graph_msg_handler. peer_connected ( & node_id_1, & init_msg) ;
22752289 let events = net_graph_msg_handler. get_and_clear_pending_msg_events ( ) ;
22762290 assert_eq ! ( events. len( ) , 0 ) ;
22772291 }
22782292
22792293 // It should send a query_channel_message with the correct information
22802294 {
22812295 let init_msg = Init { features : InitFeatures :: known ( ) } ;
2282- net_graph_msg_handler. sync_routing_table ( & node_id_1, & init_msg) ;
2296+ net_graph_msg_handler. peer_connected ( & node_id_1, & init_msg) ;
22832297 let events = net_graph_msg_handler. get_and_clear_pending_msg_events ( ) ;
2284- assert_eq ! ( events. len( ) , 1 ) ;
2298+ assert_eq ! ( events. len( ) , 2 ) ;
22852299 match & events[ 0 ] {
2300+ MessageSendEvent :: SendGossipTimestampFilter { node_id, msg } => {
2301+ assert_eq ! ( node_id, & node_id_1) ;
2302+ assert_eq ! ( msg. chain_hash, chain_hash) ;
2303+ assert_eq ! ( msg. first_timestamp, 0 ) ;
2304+ assert_eq ! ( msg. timestamp_range, u32 :: max_value( ) ) ;
2305+ } ,
2306+ _ => panic ! ( "Expected MessageSendEvent::SendChannelRangeQuery" )
2307+ } ;
2308+ match & events[ 1 ] {
22862309 MessageSendEvent :: SendChannelRangeQuery { node_id, msg } => {
22872310 assert_eq ! ( node_id, & node_id_1) ;
22882311 assert_eq ! ( msg. chain_hash, chain_hash) ;
@@ -2303,12 +2326,14 @@ mod tests {
23032326 for n in 1 ..7 {
23042327 let node_privkey = & SecretKey :: from_slice ( & [ n; 32 ] ) . unwrap ( ) ;
23052328 let node_id = PublicKey :: from_secret_key ( & secp_ctx, node_privkey) ;
2306- net_graph_msg_handler. sync_routing_table ( & node_id, & init_msg) ;
2329+ net_graph_msg_handler. peer_connected ( & node_id, & init_msg) ;
23072330 let events = net_graph_msg_handler. get_and_clear_pending_msg_events ( ) ;
23082331 if n <= 5 {
2309- assert_eq ! ( events. len( ) , 1 ) ;
2332+ assert_eq ! ( events. len( ) , 2 ) ;
23102333 } else {
2311- assert_eq ! ( events. len( ) , 0 ) ;
2334+ // Even after the we stop sending the explicit query, we should still send a
2335+ // gossip_timestamp_filter on each new connection.
2336+ assert_eq ! ( events. len( ) , 1 ) ;
23122337 }
23132338
23142339 }
0 commit comments