1515
1616struct net_device ;
1717struct xsk_queue ;
18+ struct xdp_buff ;
1819
1920/* Masks for xdp_umem_page flags.
2021 * The low 12-bits of the addr will be 0 since this is the page address, so we
@@ -101,27 +102,9 @@ struct xdp_sock {
101102 spinlock_t map_list_lock ;
102103};
103104
104- struct xdp_buff ;
105105#ifdef CONFIG_XDP_SOCKETS
106- int xsk_generic_rcv (struct xdp_sock * xs , struct xdp_buff * xdp );
107- /* Used from netdev driver */
108- bool xsk_umem_has_addrs (struct xdp_umem * umem , u32 cnt );
109- bool xsk_umem_peek_addr (struct xdp_umem * umem , u64 * addr );
110- void xsk_umem_release_addr (struct xdp_umem * umem );
111- void xsk_umem_complete_tx (struct xdp_umem * umem , u32 nb_entries );
112- bool xsk_umem_consume_tx (struct xdp_umem * umem , struct xdp_desc * desc );
113- void xsk_umem_consume_tx_done (struct xdp_umem * umem );
114- struct xdp_umem_fq_reuse * xsk_reuseq_prepare (u32 nentries );
115- struct xdp_umem_fq_reuse * xsk_reuseq_swap (struct xdp_umem * umem ,
116- struct xdp_umem_fq_reuse * newq );
117- void xsk_reuseq_free (struct xdp_umem_fq_reuse * rq );
118- struct xdp_umem * xdp_get_umem_from_qid (struct net_device * dev , u16 queue_id );
119- void xsk_set_rx_need_wakeup (struct xdp_umem * umem );
120- void xsk_set_tx_need_wakeup (struct xdp_umem * umem );
121- void xsk_clear_rx_need_wakeup (struct xdp_umem * umem );
122- void xsk_clear_tx_need_wakeup (struct xdp_umem * umem );
123- bool xsk_umem_uses_need_wakeup (struct xdp_umem * umem );
124106
107+ int xsk_generic_rcv (struct xdp_sock * xs , struct xdp_buff * xdp );
125108int __xsk_map_redirect (struct xdp_sock * xs , struct xdp_buff * xdp );
126109void __xsk_map_flush (void );
127110
@@ -153,131 +136,24 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
153136 return xsk_umem_extract_addr (addr ) + xsk_umem_extract_offset (addr );
154137}
155138
156- static inline char * xdp_umem_get_data (struct xdp_umem * umem , u64 addr )
157- {
158- unsigned long page_addr ;
159-
160- addr = xsk_umem_add_offset_to_addr (addr );
161- page_addr = (unsigned long )umem -> pages [addr >> PAGE_SHIFT ].addr ;
162-
163- return (char * )(page_addr & PAGE_MASK ) + (addr & ~PAGE_MASK );
164- }
165-
166- static inline dma_addr_t xdp_umem_get_dma (struct xdp_umem * umem , u64 addr )
167- {
168- addr = xsk_umem_add_offset_to_addr (addr );
169-
170- return umem -> pages [addr >> PAGE_SHIFT ].dma + (addr & ~PAGE_MASK );
171- }
172-
173- /* Reuse-queue aware version of FILL queue helpers */
174- static inline bool xsk_umem_has_addrs_rq (struct xdp_umem * umem , u32 cnt )
175- {
176- struct xdp_umem_fq_reuse * rq = umem -> fq_reuse ;
177-
178- if (rq -> length >= cnt )
179- return true;
180-
181- return xsk_umem_has_addrs (umem , cnt - rq -> length );
182- }
183-
184- static inline bool xsk_umem_peek_addr_rq (struct xdp_umem * umem , u64 * addr )
185- {
186- struct xdp_umem_fq_reuse * rq = umem -> fq_reuse ;
187-
188- if (!rq -> length )
189- return xsk_umem_peek_addr (umem , addr );
190-
191- * addr = rq -> handles [rq -> length - 1 ];
192- return addr ;
193- }
194-
195- static inline void xsk_umem_release_addr_rq (struct xdp_umem * umem )
196- {
197- struct xdp_umem_fq_reuse * rq = umem -> fq_reuse ;
198-
199- if (!rq -> length )
200- xsk_umem_release_addr (umem );
201- else
202- rq -> length -- ;
203- }
204-
205- static inline void xsk_umem_fq_reuse (struct xdp_umem * umem , u64 addr )
206- {
207- struct xdp_umem_fq_reuse * rq = umem -> fq_reuse ;
208-
209- rq -> handles [rq -> length ++ ] = addr ;
210- }
211-
212- /* Handle the offset appropriately depending on aligned or unaligned mode.
213- * For unaligned mode, we store the offset in the upper 16-bits of the address.
214- * For aligned mode, we simply add the offset to the address.
215- */
216- static inline u64 xsk_umem_adjust_offset (struct xdp_umem * umem , u64 address ,
217- u64 offset )
218- {
219- if (umem -> flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG )
220- return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT );
221- else
222- return address + offset ;
223- }
224-
225- static inline u32 xsk_umem_xdp_frame_sz (struct xdp_umem * umem )
226- {
227- return umem -> chunk_size_nohr ;
228- }
229-
230139#else
140+
231141static inline int xsk_generic_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
232142{
233143 return - ENOTSUPP ;
234144}
235145
236- static inline bool xsk_umem_has_addrs (struct xdp_umem * umem , u32 cnt )
237- {
238- return false;
239- }
240-
241- static inline u64 * xsk_umem_peek_addr (struct xdp_umem * umem , u64 * addr )
242- {
243- return NULL ;
244- }
245-
246- static inline void xsk_umem_release_addr (struct xdp_umem * umem )
247- {
248- }
249-
250- static inline void xsk_umem_complete_tx (struct xdp_umem * umem , u32 nb_entries )
251- {
252- }
253-
254- static inline bool xsk_umem_consume_tx (struct xdp_umem * umem ,
255- struct xdp_desc * desc )
256- {
257- return false;
258- }
259-
260- static inline void xsk_umem_consume_tx_done (struct xdp_umem * umem )
261- {
262- }
263-
264- static inline struct xdp_umem_fq_reuse * xsk_reuseq_prepare (u32 nentries )
146+ static inline int __xsk_map_redirect (struct xdp_sock * xs , struct xdp_buff * xdp )
265147{
266- return NULL ;
148+ return - EOPNOTSUPP ;
267149}
268150
269- static inline struct xdp_umem_fq_reuse * xsk_reuseq_swap (
270- struct xdp_umem * umem ,
271- struct xdp_umem_fq_reuse * newq )
272- {
273- return NULL ;
274- }
275- static inline void xsk_reuseq_free (struct xdp_umem_fq_reuse * rq )
151+ static inline void __xsk_map_flush (void )
276152{
277153}
278154
279- static inline struct xdp_umem * xdp_get_umem_from_qid (struct net_device * dev ,
280- u16 queue_id )
155+ static inline struct xdp_sock * __xsk_map_lookup_elem (struct bpf_map * map ,
156+ u32 key )
281157{
282158 return NULL ;
283159}
@@ -297,80 +173,6 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
297173 return 0 ;
298174}
299175
300- static inline char * xdp_umem_get_data (struct xdp_umem * umem , u64 addr )
301- {
302- return NULL ;
303- }
304-
305- static inline dma_addr_t xdp_umem_get_dma (struct xdp_umem * umem , u64 addr )
306- {
307- return 0 ;
308- }
309-
310- static inline bool xsk_umem_has_addrs_rq (struct xdp_umem * umem , u32 cnt )
311- {
312- return false;
313- }
314-
315- static inline u64 * xsk_umem_peek_addr_rq (struct xdp_umem * umem , u64 * addr )
316- {
317- return NULL ;
318- }
319-
320- static inline void xsk_umem_release_addr_rq (struct xdp_umem * umem )
321- {
322- }
323-
324- static inline void xsk_umem_fq_reuse (struct xdp_umem * umem , u64 addr )
325- {
326- }
327-
328- static inline void xsk_set_rx_need_wakeup (struct xdp_umem * umem )
329- {
330- }
331-
332- static inline void xsk_set_tx_need_wakeup (struct xdp_umem * umem )
333- {
334- }
335-
336- static inline void xsk_clear_rx_need_wakeup (struct xdp_umem * umem )
337- {
338- }
339-
340- static inline void xsk_clear_tx_need_wakeup (struct xdp_umem * umem )
341- {
342- }
343-
344- static inline bool xsk_umem_uses_need_wakeup (struct xdp_umem * umem )
345- {
346- return false;
347- }
348-
349- static inline u64 xsk_umem_adjust_offset (struct xdp_umem * umem , u64 handle ,
350- u64 offset )
351- {
352- return 0 ;
353- }
354-
355- static inline u32 xsk_umem_xdp_frame_sz (struct xdp_umem * umem )
356- {
357- return 0 ;
358- }
359-
360- static inline int __xsk_map_redirect (struct xdp_sock * xs , struct xdp_buff * xdp )
361- {
362- return - EOPNOTSUPP ;
363- }
364-
365- static inline void __xsk_map_flush (void )
366- {
367- }
368-
369- static inline struct xdp_sock * __xsk_map_lookup_elem (struct bpf_map * map ,
370- u32 key )
371- {
372- return NULL ;
373- }
374176#endif /* CONFIG_XDP_SOCKETS */
375177
376178#endif /* _LINUX_XDP_SOCK_H */
0 commit comments