Skip to content

Commit b01fd6e

Browse files
Cong WangAlexei Starovoitov
authored andcommitted
skmsg: Introduce a spinlock to protect ingress_msg
Currently we rely on lock_sock to protect ingress_msg, it is too big for this, we can actually just use a spinlock to protect this list like protecting other skb queues. __tcp_bpf_recvmsg() is still special because of peeking, it still has to use lock_sock. Signed-off-by: Cong Wang <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Jakub Sitnicki <[email protected]> Acked-by: John Fastabend <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 37f0e51 commit b01fd6e

File tree

3 files changed

+55
-12
lines changed

3 files changed

+55
-12
lines changed

include/linux/skmsg.h

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ struct sk_psock {
8989
#endif
9090
struct sk_buff_head ingress_skb;
9191
struct list_head ingress_msg;
92+
spinlock_t ingress_lock;
9293
unsigned long state;
9394
struct list_head link;
9495
spinlock_t link_lock;
@@ -284,14 +285,59 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
284285
static inline void sk_psock_queue_msg(struct sk_psock *psock,
285286
struct sk_msg *msg)
286287
{
288+
spin_lock_bh(&psock->ingress_lock);
287289
list_add_tail(&msg->list, &psock->ingress_msg);
290+
spin_unlock_bh(&psock->ingress_lock);
291+
}
292+
293+
static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
294+
{
295+
struct sk_msg *msg;
296+
297+
spin_lock_bh(&psock->ingress_lock);
298+
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
299+
if (msg)
300+
list_del(&msg->list);
301+
spin_unlock_bh(&psock->ingress_lock);
302+
return msg;
303+
}
304+
305+
static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
306+
{
307+
struct sk_msg *msg;
308+
309+
spin_lock_bh(&psock->ingress_lock);
310+
msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
311+
spin_unlock_bh(&psock->ingress_lock);
312+
return msg;
313+
}
314+
315+
static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
316+
struct sk_msg *msg)
317+
{
318+
struct sk_msg *ret;
319+
320+
spin_lock_bh(&psock->ingress_lock);
321+
if (list_is_last(&msg->list, &psock->ingress_msg))
322+
ret = NULL;
323+
else
324+
ret = list_next_entry(msg, list);
325+
spin_unlock_bh(&psock->ingress_lock);
326+
return ret;
288327
}
289328

290329
static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
291330
{
292331
return psock ? list_empty(&psock->ingress_msg) : true;
293332
}
294333

334+
static inline void kfree_sk_msg(struct sk_msg *msg)
335+
{
336+
if (msg->skb)
337+
consume_skb(msg->skb);
338+
kfree(msg);
339+
}
340+
295341
static inline void sk_psock_report_error(struct sk_psock *psock, int err)
296342
{
297343
struct sock *sk = psock->sk;

net/core/skmsg.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -592,6 +592,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
592592

593593
INIT_WORK(&psock->work, sk_psock_backlog);
594594
INIT_LIST_HEAD(&psock->ingress_msg);
595+
spin_lock_init(&psock->ingress_lock);
595596
skb_queue_head_init(&psock->ingress_skb);
596597

597598
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
@@ -638,7 +639,9 @@ static void sk_psock_zap_ingress(struct sk_psock *psock)
638639
skb_bpf_redirect_clear(skb);
639640
kfree_skb(skb);
640641
}
642+
spin_lock_bh(&psock->ingress_lock);
641643
__sk_psock_purge_ingress_msg(psock);
644+
spin_unlock_bh(&psock->ingress_lock);
642645
}
643646

644647
static void sk_psock_link_destroy(struct sk_psock *psock)

net/ipv4/tcp_bpf.c

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
1818
struct sk_msg *msg_rx;
1919
int i, copied = 0;
2020

21-
msg_rx = list_first_entry_or_null(&psock->ingress_msg,
22-
struct sk_msg, list);
23-
21+
msg_rx = sk_psock_peek_msg(psock);
2422
while (copied != len) {
2523
struct scatterlist *sge;
2624

@@ -68,22 +66,18 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
6866
} while (i != msg_rx->sg.end);
6967

7068
if (unlikely(peek)) {
71-
if (msg_rx == list_last_entry(&psock->ingress_msg,
72-
struct sk_msg, list))
69+
msg_rx = sk_psock_next_msg(psock, msg_rx);
70+
if (!msg_rx)
7371
break;
74-
msg_rx = list_next_entry(msg_rx, list);
7572
continue;
7673
}
7774

7875
msg_rx->sg.start = i;
7976
if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
80-
list_del(&msg_rx->list);
81-
if (msg_rx->skb)
82-
consume_skb(msg_rx->skb);
83-
kfree(msg_rx);
77+
msg_rx = sk_psock_dequeue_msg(psock);
78+
kfree_sk_msg(msg_rx);
8479
}
85-
msg_rx = list_first_entry_or_null(&psock->ingress_msg,
86-
struct sk_msg, list);
80+
msg_rx = sk_psock_peek_msg(psock);
8781
}
8882

8983
return copied;

0 commit comments

Comments
 (0)