Skip to content

Commit db804cf

Browse files
magnus-karlssonborkmann
authored andcommitted
ice: Use the xsk batched rx allocation interface
Use the new xsk batched rx allocation interface for the zero-copy data path. As the array of struct xdp_buff pointers kept by the driver is really a ring that wraps, the allocation routine is modified to detect a wrap and in that case call the allocation function twice. The allocation function cannot deal with wrapped rings, only arrays. As we now know exactly how many buffers we get and that there is no wrapping, the allocation function can be simplified even more as all if-statements in the allocation loop can be removed, improving performance. Signed-off-by: Magnus Karlsson <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 57f7f8b commit db804cf

File tree

1 file changed

+19
-25
lines changed

1 file changed

+19
-25
lines changed

drivers/net/ethernet/intel/ice/ice_xsk.c

Lines changed: 19 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -365,44 +365,38 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
365365
union ice_32b_rx_flex_desc *rx_desc;
366366
u16 ntu = rx_ring->next_to_use;
367367
struct xdp_buff **xdp;
368-
bool ok = true;
368+
u32 nb_buffs, i;
369369
dma_addr_t dma;
370370

371-
if (!count)
372-
return true;
373-
374371
rx_desc = ICE_RX_DESC(rx_ring, ntu);
375372
xdp = &rx_ring->xdp_buf[ntu];
376373

377-
do {
378-
*xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379-
if (!xdp) {
380-
ok = false;
381-
break;
382-
}
374+
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
375+
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
376+
if (!nb_buffs)
377+
return false;
383378

379+
i = nb_buffs;
380+
while (i--) {
384381
dma = xsk_buff_xdp_get_dma(*xdp);
385382
rx_desc->read.pkt_addr = cpu_to_le64(dma);
386-
rx_desc->wb.status_error0 = 0;
387383

388384
rx_desc++;
389385
xdp++;
390-
ntu++;
391-
392-
if (unlikely(ntu == rx_ring->count)) {
393-
rx_desc = ICE_RX_DESC(rx_ring, 0);
394-
xdp = rx_ring->xdp_buf;
395-
ntu = 0;
396-
}
397-
} while (--count);
386+
}
398387

399-
if (rx_ring->next_to_use != ntu) {
400-
/* clear the status bits for the next_to_use descriptor */
401-
rx_desc->wb.status_error0 = 0;
402-
ice_release_rx_desc(rx_ring, ntu);
388+
ntu += nb_buffs;
389+
if (ntu == rx_ring->count) {
390+
rx_desc = ICE_RX_DESC(rx_ring, 0);
391+
xdp = rx_ring->xdp_buf;
392+
ntu = 0;
403393
}
404394

405-
return ok;
395+
/* clear the status bits for the next_to_use descriptor */
396+
rx_desc->wb.status_error0 = 0;
397+
ice_release_rx_desc(rx_ring, ntu);
398+
399+
return count == nb_buffs ? true : false;
406400
}
407401

408402
/**
@@ -545,7 +539,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
545539
break;
546540

547541
xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
548-
(*xdp)->data_end = (*xdp)->data + size;
542+
xsk_buff_set_size(*xdp, size);
549543
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
550544

551545
xdp_res = ice_run_xdp_zc(rx_ring, *xdp);

0 commit comments

Comments
 (0)