Skip to content

Commit a71506a

Browse files
magnus-karlssonAlexei Starovoitov
authored andcommitted
xsk: Move driver interface to xdp_sock_drv.h
Move the AF_XDP zero-copy driver interface to its own include file called xdp_sock_drv.h. This, hopefully, will make it more clear for NIC driver implementors to know what functions to use for zero-copy support. v4->v5: Fix -Wmissing-prototypes by include header file. (Jakub) Signed-off-by: Magnus Karlsson <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent d20a167 commit a71506a

File tree

15 files changed

+238
-218
lines changed

15 files changed

+238
-218
lines changed

drivers/net/ethernet/intel/i40e/i40e_main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
#include "i40e_diag.h"
1212
#include "i40e_xsk.h"
1313
#include <net/udp_tunnel.h>
14-
#include <net/xdp_sock.h>
14+
#include <net/xdp_sock_drv.h>
1515
/* All i40e tracepoints are defined by the include below, which
1616
* must be included exactly once across the whole kernel with
1717
* CREATE_TRACE_POINTS defined

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
/* Copyright(c) 2018 Intel Corporation. */
33

44
#include <linux/bpf_trace.h>
5-
#include <net/xdp_sock.h>
5+
#include <net/xdp_sock_drv.h>
66
#include <net/xdp.h>
77

88
#include "i40e.h"

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
/* Copyright (c) 2019, Intel Corporation. */
33

44
#include <linux/bpf_trace.h>
5-
#include <net/xdp_sock.h>
5+
#include <net/xdp_sock_drv.h>
66
#include <net/xdp.h>
77
#include "ice.h"
88
#include "ice_base.h"

drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
/* Copyright(c) 2018 Intel Corporation. */
33

44
#include <linux/bpf_trace.h>
5-
#include <net/xdp_sock.h>
5+
#include <net/xdp_sock_drv.h>
66
#include <net/xdp.h>
77

88
#include "ixgbe.h"

drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
*/
3232

3333
#include <linux/bpf_trace.h>
34-
#include <net/xdp_sock.h>
34+
#include <net/xdp_sock_drv.h>
3535
#include "en/xdp.h"
3636
#include "en/params.h"
3737

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
#define __MLX5_EN_XSK_RX_H__
66

77
#include "en.h"
8-
#include <net/xdp_sock.h>
8+
#include <net/xdp_sock_drv.h>
99

1010
/* RX data path */
1111

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
#define __MLX5_EN_XSK_TX_H__
66

77
#include "en.h"
8-
#include <net/xdp_sock.h>
8+
#include <net/xdp_sock_drv.h>
99

1010
/* TX data path */
1111

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
22
/* Copyright (c) 2019 Mellanox Technologies. */
33

4-
#include <net/xdp_sock.h>
4+
#include <net/xdp_sock_drv.h>
55
#include "umem.h"
66
#include "setup.h"
77
#include "en/params.h"

include/net/xdp_sock.h

Lines changed: 8 additions & 206 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
struct net_device;
1717
struct xsk_queue;
18+
struct xdp_buff;
1819

1920
/* Masks for xdp_umem_page flags.
2021
* The low 12-bits of the addr will be 0 since this is the page address, so we
@@ -101,27 +102,9 @@ struct xdp_sock {
101102
spinlock_t map_list_lock;
102103
};
103104

104-
struct xdp_buff;
105105
#ifdef CONFIG_XDP_SOCKETS
106-
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
107-
/* Used from netdev driver */
108-
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
109-
bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
110-
void xsk_umem_release_addr(struct xdp_umem *umem);
111-
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
112-
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
113-
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
114-
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
115-
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
116-
struct xdp_umem_fq_reuse *newq);
117-
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
118-
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
119-
void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
120-
void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
121-
void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
122-
void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
123-
bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
124106

107+
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
125108
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
126109
void __xsk_map_flush(void);
127110

@@ -153,131 +136,24 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
153136
return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
154137
}
155138

156-
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
157-
{
158-
unsigned long page_addr;
159-
160-
addr = xsk_umem_add_offset_to_addr(addr);
161-
page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
162-
163-
return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
164-
}
165-
166-
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
167-
{
168-
addr = xsk_umem_add_offset_to_addr(addr);
169-
170-
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
171-
}
172-
173-
/* Reuse-queue aware version of FILL queue helpers */
174-
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
175-
{
176-
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
177-
178-
if (rq->length >= cnt)
179-
return true;
180-
181-
return xsk_umem_has_addrs(umem, cnt - rq->length);
182-
}
183-
184-
static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
185-
{
186-
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
187-
188-
if (!rq->length)
189-
return xsk_umem_peek_addr(umem, addr);
190-
191-
*addr = rq->handles[rq->length - 1];
192-
return addr;
193-
}
194-
195-
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
196-
{
197-
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
198-
199-
if (!rq->length)
200-
xsk_umem_release_addr(umem);
201-
else
202-
rq->length--;
203-
}
204-
205-
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
206-
{
207-
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
208-
209-
rq->handles[rq->length++] = addr;
210-
}
211-
212-
/* Handle the offset appropriately depending on aligned or unaligned mode.
213-
* For unaligned mode, we store the offset in the upper 16-bits of the address.
214-
* For aligned mode, we simply add the offset to the address.
215-
*/
216-
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
217-
u64 offset)
218-
{
219-
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
220-
return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
221-
else
222-
return address + offset;
223-
}
224-
225-
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
226-
{
227-
return umem->chunk_size_nohr;
228-
}
229-
230139
#else
140+
231141
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
232142
{
233143
return -ENOTSUPP;
234144
}
235145

236-
static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
237-
{
238-
return false;
239-
}
240-
241-
static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
242-
{
243-
return NULL;
244-
}
245-
246-
static inline void xsk_umem_release_addr(struct xdp_umem *umem)
247-
{
248-
}
249-
250-
static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
251-
{
252-
}
253-
254-
static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
255-
struct xdp_desc *desc)
256-
{
257-
return false;
258-
}
259-
260-
static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
261-
{
262-
}
263-
264-
static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
146+
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
265147
{
266-
return NULL;
148+
return -EOPNOTSUPP;
267149
}
268150

269-
static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
270-
struct xdp_umem *umem,
271-
struct xdp_umem_fq_reuse *newq)
272-
{
273-
return NULL;
274-
}
275-
static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
151+
static inline void __xsk_map_flush(void)
276152
{
277153
}
278154

279-
static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
280-
u16 queue_id)
155+
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
156+
u32 key)
281157
{
282158
return NULL;
283159
}
@@ -297,80 +173,6 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
297173
return 0;
298174
}
299175

300-
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
301-
{
302-
return NULL;
303-
}
304-
305-
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
306-
{
307-
return 0;
308-
}
309-
310-
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
311-
{
312-
return false;
313-
}
314-
315-
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
316-
{
317-
return NULL;
318-
}
319-
320-
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
321-
{
322-
}
323-
324-
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
325-
{
326-
}
327-
328-
static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
329-
{
330-
}
331-
332-
static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
333-
{
334-
}
335-
336-
static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
337-
{
338-
}
339-
340-
static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
341-
{
342-
}
343-
344-
static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
345-
{
346-
return false;
347-
}
348-
349-
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
350-
u64 offset)
351-
{
352-
return 0;
353-
}
354-
355-
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
356-
{
357-
return 0;
358-
}
359-
360-
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
361-
{
362-
return -EOPNOTSUPP;
363-
}
364-
365-
static inline void __xsk_map_flush(void)
366-
{
367-
}
368-
369-
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
370-
u32 key)
371-
{
372-
return NULL;
373-
}
374176
#endif /* CONFIG_XDP_SOCKETS */
375177

376178
#endif /* _LINUX_XDP_SOCK_H */

0 commit comments

Comments
 (0)