@@ -36,19 +36,28 @@ static struct xdp_sock *xdp_sk(struct sock *sk)
3636
3737bool xsk_is_setup_for_bpf_map (struct xdp_sock * xs )
3838{
39- return !!xs -> rx ;
39+ return READ_ONCE (xs -> rx ) && READ_ONCE (xs -> umem ) &&
40+ READ_ONCE (xs -> umem -> fq );
4041}
4142
42- static int __xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
43+ u64 * xsk_umem_peek_addr (struct xdp_umem * umem , u64 * addr )
44+ {
45+ return xskq_peek_addr (umem -> fq , addr );
46+ }
47+ EXPORT_SYMBOL (xsk_umem_peek_addr );
48+
49+ void xsk_umem_discard_addr (struct xdp_umem * umem )
50+ {
51+ xskq_discard_addr (umem -> fq );
52+ }
53+ EXPORT_SYMBOL (xsk_umem_discard_addr );
54+
55+ static int __xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp , u32 len )
4356{
44- u32 len = xdp -> data_end - xdp -> data ;
4557 void * buffer ;
4658 u64 addr ;
4759 int err ;
4860
49- if (xs -> dev != xdp -> rxq -> dev || xs -> queue_id != xdp -> rxq -> queue_index )
50- return - EINVAL ;
51-
5261 if (!xskq_peek_addr (xs -> umem -> fq , & addr ) ||
5362 len > xs -> umem -> chunk_size_nohr ) {
5463 xs -> rx_dropped ++ ;
@@ -60,25 +69,41 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
6069 buffer = xdp_umem_get_data (xs -> umem , addr );
6170 memcpy (buffer , xdp -> data , len );
6271 err = xskq_produce_batch_desc (xs -> rx , addr , len );
63- if (!err )
72+ if (!err ) {
6473 xskq_discard_addr (xs -> umem -> fq );
65- else
66- xs -> rx_dropped ++ ;
74+ xdp_return_buff (xdp );
75+ return 0 ;
76+ }
6777
78+ xs -> rx_dropped ++ ;
6879 return err ;
6980}
7081
71- int xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
82+ static int __xsk_rcv_zc (struct xdp_sock * xs , struct xdp_buff * xdp , u32 len )
7283{
73- int err ;
84+ int err = xskq_produce_batch_desc ( xs -> rx , ( u64 ) xdp -> handle , len ) ;
7485
75- err = __xsk_rcv (xs , xdp );
76- if (likely (!err ))
86+ if (err ) {
7787 xdp_return_buff (xdp );
88+ xs -> rx_dropped ++ ;
89+ }
7890
7991 return err ;
8092}
8193
94+ int xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
95+ {
96+ u32 len ;
97+
98+ if (xs -> dev != xdp -> rxq -> dev || xs -> queue_id != xdp -> rxq -> queue_index )
99+ return - EINVAL ;
100+
101+ len = xdp -> data_end - xdp -> data ;
102+
103+ return (xdp -> rxq -> mem .type == MEM_TYPE_ZERO_COPY ) ?
104+ __xsk_rcv_zc (xs , xdp , len ) : __xsk_rcv (xs , xdp , len );
105+ }
106+
82107void xsk_flush (struct xdp_sock * xs )
83108{
84109 xskq_produce_flush_desc (xs -> rx );
@@ -87,12 +112,29 @@ void xsk_flush(struct xdp_sock *xs)
87112
88113int xsk_generic_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
89114{
115+ u32 len = xdp -> data_end - xdp -> data ;
116+ void * buffer ;
117+ u64 addr ;
90118 int err ;
91119
92- err = __xsk_rcv (xs , xdp );
93- if (!err )
120+ if (!xskq_peek_addr (xs -> umem -> fq , & addr ) ||
121+ len > xs -> umem -> chunk_size_nohr ) {
122+ xs -> rx_dropped ++ ;
123+ return - ENOSPC ;
124+ }
125+
126+ addr += xs -> umem -> headroom ;
127+
128+ buffer = xdp_umem_get_data (xs -> umem , addr );
129+ memcpy (buffer , xdp -> data , len );
130+ err = xskq_produce_batch_desc (xs -> rx , addr , len );
131+ if (!err ) {
132+ xskq_discard_addr (xs -> umem -> fq );
94133 xsk_flush (xs );
134+ return 0 ;
135+ }
95136
137+ xs -> rx_dropped ++ ;
96138 return err ;
97139}
98140
@@ -291,6 +333,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
291333 struct sock * sk = sock -> sk ;
292334 struct xdp_sock * xs = xdp_sk (sk );
293335 struct net_device * dev ;
336+ u32 flags , qid ;
294337 int err = 0 ;
295338
296339 if (addr_len < sizeof (struct sockaddr_xdp ))
@@ -315,16 +358,26 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
315358 goto out_unlock ;
316359 }
317360
318- if ((xs -> rx && sxdp -> sxdp_queue_id >= dev -> real_num_rx_queues ) ||
319- (xs -> tx && sxdp -> sxdp_queue_id >= dev -> real_num_tx_queues )) {
361+ qid = sxdp -> sxdp_queue_id ;
362+
363+ if ((xs -> rx && qid >= dev -> real_num_rx_queues ) ||
364+ (xs -> tx && qid >= dev -> real_num_tx_queues )) {
320365 err = - EINVAL ;
321366 goto out_unlock ;
322367 }
323368
324- if (sxdp -> sxdp_flags & XDP_SHARED_UMEM ) {
369+ flags = sxdp -> sxdp_flags ;
370+
371+ if (flags & XDP_SHARED_UMEM ) {
325372 struct xdp_sock * umem_xs ;
326373 struct socket * sock ;
327374
375+ if ((flags & XDP_COPY ) || (flags & XDP_ZEROCOPY )) {
376+ /* Cannot specify flags for shared sockets. */
377+ err = - EINVAL ;
378+ goto out_unlock ;
379+ }
380+
328381 if (xs -> umem ) {
329382 /* We have already our own. */
330383 err = - EINVAL ;
@@ -343,8 +396,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
343396 err = - EBADF ;
344397 sockfd_put (sock );
345398 goto out_unlock ;
346- } else if (umem_xs -> dev != dev ||
347- umem_xs -> queue_id != sxdp -> sxdp_queue_id ) {
399+ } else if (umem_xs -> dev != dev || umem_xs -> queue_id != qid ) {
348400 err = - EINVAL ;
349401 sockfd_put (sock );
350402 goto out_unlock ;
@@ -360,6 +412,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
360412 /* This xsk has its own umem. */
361413 xskq_set_umem (xs -> umem -> fq , & xs -> umem -> props );
362414 xskq_set_umem (xs -> umem -> cq , & xs -> umem -> props );
415+
416+ err = xdp_umem_assign_dev (xs -> umem , dev , qid , flags );
417+ if (err )
418+ goto out_unlock ;
363419 }
364420
365421 xs -> dev = dev ;
0 commit comments