Skip to content

Commit cf329aa

Browse files
Paolo Abenidavem330
authored andcommitted
udp: cope with UDP GRO packet misdirection
In some scenarios, the GRO engine can assemble an UDP GRO packet that ultimately lands on a non GRO-enabled socket. This patch tries to address the issue explicitly checking for the UDP socket features before enqueuing the packet, and eventually segmenting the unexpected GRO packet, as needed. We must also cope with re-insertion requests: after segmentation the UDP code calls the helper introduced by the previous patches, as needed. Segmentation is performed by a common helper, which takes care of updating socket and protocol stats is case of failure. rfc v3 -> v1 - fix compile issues with rxrpc - when gso_segment returns NULL, treat is as an error - added 'ipv4' argument to udp_rcv_segment() rfc v2 -> rfc v3 - moved udp_rcv_segment() into net/udp.h, account errors to socket and ns, always return NULL or segs list Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 80bde36 commit cf329aa

File tree

4 files changed

+88
-10
lines changed

4 files changed

+88
-10
lines changed

include/linux/udp.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,12 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
132132
}
133133
}
134134

135+
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
136+
{
137+
return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
138+
skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
139+
}
140+
135141
#define udp_portaddr_for_each_entry(__sk, list) \
136142
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
137143

include/net/udp.h

Lines changed: 37 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -417,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
417417
} while(0)
418418

419419
#if IS_ENABLED(CONFIG_IPV6)
420-
#define __UDPX_INC_STATS(sk, field) \
421-
do { \
422-
if ((sk)->sk_family == AF_INET) \
423-
__UDP_INC_STATS(sock_net(sk), field, 0); \
424-
else \
425-
__UDP6_INC_STATS(sock_net(sk), field, 0); \
426-
} while (0)
420+
#define __UDPX_MIB(sk, ipv4) \
421+
({ \
422+
ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
423+
sock_net(sk)->mib.udp_statistics) : \
424+
(IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
425+
sock_net(sk)->mib.udp_stats_in6); \
426+
})
427427
#else
428-
#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
428+
#define __UDPX_MIB(sk, ipv4) \
429+
({ \
430+
IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
431+
sock_net(sk)->mib.udp_statistics; \
432+
})
429433
#endif
430434

435+
#define __UDPX_INC_STATS(sk, field) \
436+
__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
437+
431438
#ifdef CONFIG_PROC_FS
432439
struct udp_seq_afinfo {
433440
sa_family_t family;
@@ -461,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
461468
void udpv6_encap_enable(void);
462469
#endif
463470

471+
static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
472+
struct sk_buff *skb, bool ipv4)
473+
{
474+
struct sk_buff *segs;
475+
476+
/* the GSO CB lays after the UDP one, no need to save and restore any
477+
* CB fragment
478+
*/
479+
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
480+
if (unlikely(IS_ERR_OR_NULL(segs))) {
481+
int segs_nr = skb_shinfo(skb)->gso_segs;
482+
483+
atomic_add(segs_nr, &sk->sk_drops);
484+
SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
485+
kfree_skb(skb);
486+
return NULL;
487+
}
488+
489+
consume_skb(skb);
490+
return segs;
491+
}
492+
464493
#endif /* _UDP_H */

net/ipv4/udp.c

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1906,7 +1906,7 @@ EXPORT_SYMBOL(udp_encap_enable);
19061906
* Note that in the success and error cases, the skb is assumed to
19071907
* have either been requeued or freed.
19081908
*/
1909-
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1909+
static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
19101910
{
19111911
struct udp_sock *up = udp_sk(sk);
19121912
int is_udplite = IS_UDPLITE(sk);
@@ -2009,6 +2009,27 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
20092009
return -1;
20102010
}
20112011

2012+
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2013+
{
2014+
struct sk_buff *next, *segs;
2015+
int ret;
2016+
2017+
if (likely(!udp_unexpected_gso(sk, skb)))
2018+
return udp_queue_rcv_one_skb(sk, skb);
2019+
2020+
BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
2021+
__skb_push(skb, -skb_mac_offset(skb));
2022+
segs = udp_rcv_segment(sk, skb, true);
2023+
for (skb = segs; skb; skb = next) {
2024+
next = skb->next;
2025+
__skb_pull(skb, skb_transport_offset(skb));
2026+
ret = udp_queue_rcv_one_skb(sk, skb);
2027+
if (ret > 0)
2028+
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
2029+
}
2030+
return 0;
2031+
}
2032+
20122033
/* For TCP sockets, sk_rx_dst is protected by socket lock
20132034
* For UDP, we use xchg() to guard against concurrent changes.
20142035
*/

net/ipv6/udp.c

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ void udpv6_encap_enable(void)
554554
}
555555
EXPORT_SYMBOL(udpv6_encap_enable);
556556

557-
static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
557+
static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
558558
{
559559
struct udp_sock *up = udp_sk(sk);
560560
int is_udplite = IS_UDPLITE(sk);
@@ -637,6 +637,28 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
637637
return -1;
638638
}
639639

640+
static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
641+
{
642+
struct sk_buff *next, *segs;
643+
int ret;
644+
645+
if (likely(!udp_unexpected_gso(sk, skb)))
646+
return udpv6_queue_rcv_one_skb(sk, skb);
647+
648+
__skb_push(skb, -skb_mac_offset(skb));
649+
segs = udp_rcv_segment(sk, skb, false);
650+
for (skb = segs; skb; skb = next) {
651+
next = skb->next;
652+
__skb_pull(skb, skb_transport_offset(skb));
653+
654+
ret = udpv6_queue_rcv_one_skb(sk, skb);
655+
if (ret > 0)
656+
ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
657+
true);
658+
}
659+
return 0;
660+
}
661+
640662
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
641663
__be16 loc_port, const struct in6_addr *loc_addr,
642664
__be16 rmt_port, const struct in6_addr *rmt_addr,

0 commit comments

Comments
 (0)