|
7 | 7 | #define _LINUX_XDP_SOCK_DRV_H |
8 | 8 |
|
9 | 9 | #include <net/xdp_sock.h> |
| 10 | +#include <net/xsk_buff_pool.h> |
10 | 11 |
|
11 | 12 | #ifdef CONFIG_XDP_SOCKETS |
12 | 13 |
|
@@ -101,6 +102,94 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) |
101 | 102 | return umem->chunk_size_nohr; |
102 | 103 | } |
103 | 104 |
|
| 105 | +static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) |
| 106 | +{ |
| 107 | + return XDP_PACKET_HEADROOM + umem->headroom; |
| 108 | +} |
| 109 | + |
| 110 | +static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) |
| 111 | +{ |
| 112 | + return umem->chunk_size; |
| 113 | +} |
| 114 | + |
| 115 | +static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) |
| 116 | +{ |
| 117 | + return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem); |
| 118 | +} |
| 119 | + |
| 120 | +static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, |
| 121 | + struct xdp_rxq_info *rxq) |
| 122 | +{ |
| 123 | + xp_set_rxq_info(umem->pool, rxq); |
| 124 | +} |
| 125 | + |
| 126 | +static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, |
| 127 | + unsigned long attrs) |
| 128 | +{ |
| 129 | + xp_dma_unmap(umem->pool, attrs); |
| 130 | +} |
| 131 | + |
| 132 | +static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, |
| 133 | + unsigned long attrs) |
| 134 | +{ |
| 135 | + return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs); |
| 136 | +} |
| 137 | + |
| 138 | +static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) |
| 139 | +{ |
| 140 | + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
| 141 | + |
| 142 | + return xp_get_dma(xskb); |
| 143 | +} |
| 144 | + |
| 145 | +static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) |
| 146 | +{ |
| 147 | + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
| 148 | + |
| 149 | + return xp_get_frame_dma(xskb); |
| 150 | +} |
| 151 | + |
| 152 | +static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) |
| 153 | +{ |
| 154 | + return xp_alloc(umem->pool); |
| 155 | +} |
| 156 | + |
| 157 | +static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) |
| 158 | +{ |
| 159 | + return xp_can_alloc(umem->pool, count); |
| 160 | +} |
| 161 | + |
| 162 | +static inline void xsk_buff_free(struct xdp_buff *xdp) |
| 163 | +{ |
| 164 | + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
| 165 | + |
| 166 | + xp_free(xskb); |
| 167 | +} |
| 168 | + |
| 169 | +static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) |
| 170 | +{ |
| 171 | + return xp_raw_get_dma(umem->pool, addr); |
| 172 | +} |
| 173 | + |
| 174 | +static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) |
| 175 | +{ |
| 176 | + return xp_raw_get_data(umem->pool, addr); |
| 177 | +} |
| 178 | + |
| 179 | +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) |
| 180 | +{ |
| 181 | + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
| 182 | + |
| 183 | + xp_dma_sync_for_cpu(xskb); |
| 184 | +} |
| 185 | + |
| 186 | +static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, |
| 187 | + dma_addr_t dma, |
| 188 | + size_t size) |
| 189 | +{ |
| 190 | + xp_dma_sync_for_device(umem->pool, dma, size); |
| 191 | +} |
| 192 | + |
104 | 193 | #else |
105 | 194 |
|
106 | 195 | static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) |
@@ -212,6 +301,81 @@ static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) |
212 | 301 | return 0; |
213 | 302 | } |
214 | 303 |
|
| 304 | +static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) |
| 305 | +{ |
| 306 | + return 0; |
| 307 | +} |
| 308 | + |
| 309 | +static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) |
| 310 | +{ |
| 311 | + return 0; |
| 312 | +} |
| 313 | + |
| 314 | +static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) |
| 315 | +{ |
| 316 | + return 0; |
| 317 | +} |
| 318 | + |
| 319 | +static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, |
| 320 | + struct xdp_rxq_info *rxq) |
| 321 | +{ |
| 322 | +} |
| 323 | + |
| 324 | +static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, |
| 325 | + unsigned long attrs) |
| 326 | +{ |
| 327 | +} |
| 328 | + |
| 329 | +static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, |
| 330 | + unsigned long attrs) |
| 331 | +{ |
| 332 | + return 0; |
| 333 | +} |
| 334 | + |
| 335 | +static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) |
| 336 | +{ |
| 337 | + return 0; |
| 338 | +} |
| 339 | + |
| 340 | +static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) |
| 341 | +{ |
| 342 | + return 0; |
| 343 | +} |
| 344 | + |
| 345 | +static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) |
| 346 | +{ |
| 347 | + return NULL; |
| 348 | +} |
| 349 | + |
| 350 | +static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) |
| 351 | +{ |
| 352 | + return false; |
| 353 | +} |
| 354 | + |
| 355 | +static inline void xsk_buff_free(struct xdp_buff *xdp) |
| 356 | +{ |
| 357 | +} |
| 358 | + |
| 359 | +static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) |
| 360 | +{ |
| 361 | + return 0; |
| 362 | +} |
| 363 | + |
| 364 | +static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) |
| 365 | +{ |
| 366 | + return NULL; |
| 367 | +} |
| 368 | + |
| 369 | +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) |
| 370 | +{ |
| 371 | +} |
| 372 | + |
| 373 | +static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, |
| 374 | + dma_addr_t dma, |
| 375 | + size_t size) |
| 376 | +{ |
| 377 | +} |
| 378 | + |
215 | 379 | #endif /* CONFIG_XDP_SOCKETS */ |
216 | 380 |
|
217 | 381 | #endif /* _LINUX_XDP_SOCK_DRV_H */ |
0 commit comments