File tree Expand file tree Collapse file tree 1 file changed +6
-5
lines changed Expand file tree Collapse file tree 1 file changed +6
-5
lines changed Original file line number Diff line number Diff line change @@ -1492,13 +1492,14 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
14921492 struct sk_buff_head * list = & sk -> sk_receive_queue ;
14931493 int rmem , err = - ENOMEM ;
14941494 spinlock_t * busy = NULL ;
1495- int size ;
1495+ int size , rcvbuf ;
14961496
1497- /* try to avoid the costly atomic add/sub pair when the receive
1498- * queue is full; always allow at least a packet
1497+ /* Immediately drop when the receive queue is full.
1498+ * Always allow at least one packet.
14991499 */
15001500 rmem = atomic_read (& sk -> sk_rmem_alloc );
1501- if (rmem > sk -> sk_rcvbuf )
1501+ rcvbuf = READ_ONCE (sk -> sk_rcvbuf );
1502+ if (rmem > rcvbuf )
15021503 goto drop ;
15031504
15041505 /* Under mem pressure, it might be helpful to help udp_recvmsg()
@@ -1507,7 +1508,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
15071508 * - Less cache line misses at copyout() time
15081509 * - Less work at consume_skb() (less alien page frag freeing)
15091510 */
1510- if (rmem > (sk -> sk_rcvbuf >> 1 )) {
1511+ if (rmem > (rcvbuf >> 1 )) {
15111512 skb_condense (skb );
15121513
15131514 busy = busylock_acquire (sk );
You can’t perform that action at this time.
0 commit comments