Skip to content

Commit 57f7f8b

Browse files
magnus-karlssonborkmann
authored andcommitted
ice: Use xdp_buf instead of rx_buf for xsk zero-copy
In order to use the new xsk batched buffer allocation interface, a pointer to an array of struct xsk_buff pointers need to be provided so that the function can put the result of the allocation there. In the ice driver, we already have a ring that stores pointers to xdp_buffs. This is only used for the xsk zero-copy driver and is a union with the structure that is used for the regular non zero-copy path. Unfortunately, that structure is larger than the xdp_buffs pointers which mean that there will be a stride (of 20 bytes) between each xdp_buff pointer. And feeding this into the xsk_buff_alloc_batch interface will not work since it assumes a regular array of xdp_buff pointers (each 8 bytes with 0 bytes in-between them on a 64-bit system). To fix this, remove the xdp_buff pointer from the rx_buf union and move it one step higher to the union above which only has pointers to arrays in it. This solves the problem and we can directly feed the SW ring of xdp_buff pointers straight into the allocation function in the next patch when that interface is used. This will improve performance. Signed-off-by: Magnus Karlsson <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 47e4075 commit 57f7f8b

File tree

2 files changed

+33
-39
lines changed

2 files changed

+33
-39
lines changed

drivers/net/ethernet/intel/ice/ice_txrx.h

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -164,17 +164,10 @@ struct ice_tx_offload_params {
164164
};
165165

166166
struct ice_rx_buf {
167-
union {
168-
struct {
169-
dma_addr_t dma;
170-
struct page *page;
171-
unsigned int page_offset;
172-
u16 pagecnt_bias;
173-
};
174-
struct {
175-
struct xdp_buff *xdp;
176-
};
177-
};
167+
dma_addr_t dma;
168+
struct page *page;
169+
unsigned int page_offset;
170+
u16 pagecnt_bias;
178171
};
179172

180173
struct ice_q_stats {
@@ -270,6 +263,7 @@ struct ice_ring {
270263
union {
271264
struct ice_tx_buf *tx_buf;
272265
struct ice_rx_buf *rx_buf;
266+
struct xdp_buff **xdp_buf;
273267
};
274268
/* CL2 - 2nd cacheline starts here */
275269
u16 q_index; /* Queue number of ring */

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -364,34 +364,34 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
364364
{
365365
union ice_32b_rx_flex_desc *rx_desc;
366366
u16 ntu = rx_ring->next_to_use;
367-
struct ice_rx_buf *rx_buf;
367+
struct xdp_buff **xdp;
368368
bool ok = true;
369369
dma_addr_t dma;
370370

371371
if (!count)
372372
return true;
373373

374374
rx_desc = ICE_RX_DESC(rx_ring, ntu);
375-
rx_buf = &rx_ring->rx_buf[ntu];
375+
xdp = &rx_ring->xdp_buf[ntu];
376376

377377
do {
378-
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379-
if (!rx_buf->xdp) {
378+
*xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379+
if (!xdp) {
380380
ok = false;
381381
break;
382382
}
383383

384-
dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
384+
dma = xsk_buff_xdp_get_dma(*xdp);
385385
rx_desc->read.pkt_addr = cpu_to_le64(dma);
386386
rx_desc->wb.status_error0 = 0;
387387

388388
rx_desc++;
389-
rx_buf++;
389+
xdp++;
390390
ntu++;
391391

392392
if (unlikely(ntu == rx_ring->count)) {
393393
rx_desc = ICE_RX_DESC(rx_ring, 0);
394-
rx_buf = rx_ring->rx_buf;
394+
xdp = rx_ring->xdp_buf;
395395
ntu = 0;
396396
}
397397
} while (--count);
@@ -421,33 +421,33 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
421421
/**
422422
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
423423
* @rx_ring: Rx ring
424-
* @rx_buf: zero-copy Rx buffer
424+
* @xdp_arr: Pointer to the SW ring of xdp_buff pointers
425425
*
426426
* This function allocates a new skb from a zero-copy Rx buffer.
427427
*
428428
* Returns the skb on success, NULL on failure.
429429
*/
430430
static struct sk_buff *
431-
ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
431+
ice_construct_skb_zc(struct ice_ring *rx_ring, struct xdp_buff **xdp_arr)
432432
{
433-
unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
434-
unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
435-
unsigned int datasize_hard = rx_buf->xdp->data_end -
436-
rx_buf->xdp->data_hard_start;
433+
struct xdp_buff *xdp = *xdp_arr;
434+
unsigned int metasize = xdp->data - xdp->data_meta;
435+
unsigned int datasize = xdp->data_end - xdp->data;
436+
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
437437
struct sk_buff *skb;
438438

439439
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
440440
GFP_ATOMIC | __GFP_NOWARN);
441441
if (unlikely(!skb))
442442
return NULL;
443443

444-
skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
445-
memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
444+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
445+
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
446446
if (metasize)
447447
skb_metadata_set(skb, metasize);
448448

449-
xsk_buff_free(rx_buf->xdp);
450-
rx_buf->xdp = NULL;
449+
xsk_buff_free(xdp);
450+
*xdp_arr = NULL;
451451
return skb;
452452
}
453453

@@ -521,7 +521,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
521521
while (likely(total_rx_packets < (unsigned int)budget)) {
522522
union ice_32b_rx_flex_desc *rx_desc;
523523
unsigned int size, xdp_res = 0;
524-
struct ice_rx_buf *rx_buf;
524+
struct xdp_buff **xdp;
525525
struct sk_buff *skb;
526526
u16 stat_err_bits;
527527
u16 vlan_tag = 0;
@@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
544544
if (!size)
545545
break;
546546

547-
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
548-
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
549-
xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
547+
xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
548+
(*xdp)->data_end = (*xdp)->data + size;
549+
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
550550

551-
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
551+
xdp_res = ice_run_xdp_zc(rx_ring, *xdp);
552552
if (xdp_res) {
553553
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
554554
xdp_xmit |= xdp_res;
555555
else
556-
xsk_buff_free(rx_buf->xdp);
556+
xsk_buff_free(*xdp);
557557

558-
rx_buf->xdp = NULL;
558+
*xdp = NULL;
559559
total_rx_bytes += size;
560560
total_rx_packets++;
561561
cleaned_count++;
@@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
565565
}
566566

567567
/* XDP_PASS path */
568-
skb = ice_construct_skb_zc(rx_ring, rx_buf);
568+
skb = ice_construct_skb_zc(rx_ring, xdp);
569569
if (!skb) {
570570
rx_ring->rx_stats.alloc_buf_failed++;
571571
break;
@@ -813,12 +813,12 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
813813
u16 i;
814814

815815
for (i = 0; i < rx_ring->count; i++) {
816-
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
816+
struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
817817

818-
if (!rx_buf->xdp)
818+
if (!xdp)
819819
continue;
820820

821-
rx_buf->xdp = NULL;
821+
*xdp = NULL;
822822
}
823823
}
824824

0 commit comments

Comments
 (0)