@@ -1340,9 +1340,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13401340 struct tcp_sock * tp = tcp_sk (sk );
13411341 unsigned char * ptr = (skb_transport_header (ack_skb ) +
13421342 TCP_SKB_CB (ack_skb )-> sacked );
1343- struct tcp_sack_block_wire * sp = (struct tcp_sack_block_wire * )(ptr + 2 );
1343+ struct tcp_sack_block_wire * sp_wire = (struct tcp_sack_block_wire * )(ptr + 2 );
1344+ struct tcp_sack_block sp [4 ];
13441345 struct sk_buff * cached_skb ;
13451346 int num_sacks = (ptr [1 ] - TCPOLEN_SACK_BASE )>>3 ;
1347+ int used_sacks ;
13461348 int reord = tp -> packets_out ;
13471349 int flag = 0 ;
13481350 int found_dup_sack = 0 ;
@@ -1357,7 +1359,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13571359 tp -> highest_sack = tcp_write_queue_head (sk );
13581360 }
13591361
1360- found_dup_sack = tcp_check_dsack (tp , ack_skb , sp ,
1362+ found_dup_sack = tcp_check_dsack (tp , ack_skb , sp_wire ,
13611363 num_sacks , prior_snd_una );
13621364 if (found_dup_sack )
13631365 flag |= FLAG_DSACKING_ACK ;
@@ -1372,14 +1374,49 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13721374 if (!tp -> packets_out )
13731375 goto out ;
13741376
1377+ used_sacks = 0 ;
1378+ first_sack_index = 0 ;
1379+ for (i = 0 ; i < num_sacks ; i ++ ) {
1380+ int dup_sack = !i && found_dup_sack ;
1381+
1382+ sp [used_sacks ].start_seq = ntohl (get_unaligned (& sp_wire [i ].start_seq ));
1383+ sp [used_sacks ].end_seq = ntohl (get_unaligned (& sp_wire [i ].end_seq ));
1384+
1385+ if (!tcp_is_sackblock_valid (tp , dup_sack ,
1386+ sp [used_sacks ].start_seq ,
1387+ sp [used_sacks ].end_seq )) {
1388+ if (dup_sack ) {
1389+ if (!tp -> undo_marker )
1390+ NET_INC_STATS_BH (LINUX_MIB_TCPDSACKIGNOREDNOUNDO );
1391+ else
1392+ NET_INC_STATS_BH (LINUX_MIB_TCPDSACKIGNOREDOLD );
1393+ } else {
1394+ /* Don't count olds caused by ACK reordering */
1395+ if ((TCP_SKB_CB (ack_skb )-> ack_seq != tp -> snd_una ) &&
1396+ !after (sp [used_sacks ].end_seq , tp -> snd_una ))
1397+ continue ;
1398+ NET_INC_STATS_BH (LINUX_MIB_TCPSACKDISCARD );
1399+ }
1400+ if (i == 0 )
1401+ first_sack_index = -1 ;
1402+ continue ;
1403+ }
1404+
1405+ /* Ignore very old stuff early */
1406+ if (!after (sp [used_sacks ].end_seq , prior_snd_una ))
1407+ continue ;
1408+
1409+ used_sacks ++ ;
1410+ }
1411+
13751412 /* SACK fastpath:
13761413 * if the only SACK change is the increase of the end_seq of
13771414 * the first block then only apply that SACK block
13781415 * and use retrans queue hinting otherwise slowpath */
13791416 force_one_sack = 1 ;
1380- for (i = 0 ; i < num_sacks ; i ++ ) {
1381- __be32 start_seq = sp [i ].start_seq ;
1382- __be32 end_seq = sp [i ].end_seq ;
1417+ for (i = 0 ; i < used_sacks ; i ++ ) {
1418+ u32 start_seq = sp [i ].start_seq ;
1419+ u32 end_seq = sp [i ].end_seq ;
13831420
13841421 if (i == 0 ) {
13851422 if (tp -> recv_sack_cache [i ].start_seq != start_seq )
@@ -1398,19 +1435,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13981435 tp -> recv_sack_cache [i ].end_seq = 0 ;
13991436 }
14001437
1401- first_sack_index = 0 ;
14021438 if (force_one_sack )
1403- num_sacks = 1 ;
1439+ used_sacks = 1 ;
14041440 else {
14051441 int j ;
14061442 tp -> fastpath_skb_hint = NULL ;
14071443
14081444 /* order SACK blocks to allow in order walk of the retrans queue */
1409- for (i = num_sacks - 1 ; i > 0 ; i -- ) {
1445+ for (i = used_sacks - 1 ; i > 0 ; i -- ) {
14101446 for (j = 0 ; j < i ; j ++ ){
1411- if (after (ntohl (sp [j ].start_seq ),
1412- ntohl (sp [j + 1 ].start_seq ))){
1413- struct tcp_sack_block_wire tmp ;
1447+ if (after (sp [j ].start_seq , sp [j + 1 ].start_seq )) {
1448+ struct tcp_sack_block tmp ;
14141449
14151450 tmp = sp [j ];
14161451 sp [j ] = sp [j + 1 ];
@@ -1433,32 +1468,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
14331468 cached_fack_count = 0 ;
14341469 }
14351470
1436- for (i = 0 ; i < num_sacks ; i ++ ) {
1471+ for (i = 0 ; i < used_sacks ; i ++ ) {
14371472 struct sk_buff * skb ;
1438- __u32 start_seq = ntohl ( sp -> start_seq ) ;
1439- __u32 end_seq = ntohl ( sp -> end_seq ) ;
1473+ u32 start_seq = sp [ i ]. start_seq ;
1474+ u32 end_seq = sp [ i ]. end_seq ;
14401475 int fack_count ;
14411476 int dup_sack = (found_dup_sack && (i == first_sack_index ));
14421477 int next_dup = (found_dup_sack && (i + 1 == first_sack_index ));
14431478
1444- sp ++ ;
1445-
1446- if (!tcp_is_sackblock_valid (tp , dup_sack , start_seq , end_seq )) {
1447- if (dup_sack ) {
1448- if (!tp -> undo_marker )
1449- NET_INC_STATS_BH (LINUX_MIB_TCPDSACKIGNOREDNOUNDO );
1450- else
1451- NET_INC_STATS_BH (LINUX_MIB_TCPDSACKIGNOREDOLD );
1452- } else {
1453- /* Don't count olds caused by ACK reordering */
1454- if ((TCP_SKB_CB (ack_skb )-> ack_seq != tp -> snd_una ) &&
1455- !after (end_seq , tp -> snd_una ))
1456- continue ;
1457- NET_INC_STATS_BH (LINUX_MIB_TCPSACKDISCARD );
1458- }
1459- continue ;
1460- }
1461-
14621479 skb = cached_skb ;
14631480 fack_count = cached_fack_count ;
14641481
@@ -1489,8 +1506,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
14891506
14901507 /* Due to sorting DSACK may reside within this SACK block! */
14911508 if (next_dup ) {
1492- u32 dup_start = ntohl ( sp -> start_seq ) ;
1493- u32 dup_end = ntohl ( sp -> end_seq ) ;
1509+ u32 dup_start = sp [ i + 1 ]. start_seq ;
1510+ u32 dup_end = sp [ i + 1 ]. end_seq ;
14941511
14951512 if (before (TCP_SKB_CB (skb )-> seq , dup_end )) {
14961513 in_sack = tcp_match_skb_to_sack (sk , skb , dup_start , dup_end );
0 commit comments