Skip to content

Commit 6aab0bb

Browse files
magnus-karlssonborkmann
authored andcommitted
i40e: Use the xsk batched rx allocation interface
Use the new xsk batched rx allocation interface for the zero-copy data path. As the array of struct xdp_buff pointers kept by the driver is really a ring that wraps, the allocation routine is modified to detect a wrap and in that case call the allocation function twice. The allocation function cannot deal with wrapped rings, only arrays. As we now know exactly how many buffers we get and that there is no wrapping, the allocation function can be simplified even more as all if-statements in the allocation loop can be removed, improving performance. Signed-off-by: Magnus Karlsson <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent db804cf commit 6aab0bb

File tree

1 file changed

+25
-27
lines changed

1 file changed

+25
-27
lines changed

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 25 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
193193
{
194194
u16 ntu = rx_ring->next_to_use;
195195
union i40e_rx_desc *rx_desc;
196-
struct xdp_buff **bi, *xdp;
196+
struct xdp_buff **xdp;
197+
u32 nb_buffs, i;
197198
dma_addr_t dma;
198-
bool ok = true;
199199

200200
rx_desc = I40E_RX_DESC(rx_ring, ntu);
201-
bi = i40e_rx_bi(rx_ring, ntu);
202-
do {
203-
xdp = xsk_buff_alloc(rx_ring->xsk_pool);
204-
if (!xdp) {
205-
ok = false;
206-
goto no_buffers;
207-
}
208-
*bi = xdp;
209-
dma = xsk_buff_xdp_get_dma(xdp);
201+
xdp = i40e_rx_bi(rx_ring, ntu);
202+
203+
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
204+
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
205+
if (!nb_buffs)
206+
return false;
207+
208+
i = nb_buffs;
209+
while (i--) {
210+
dma = xsk_buff_xdp_get_dma(*xdp);
210211
rx_desc->read.pkt_addr = cpu_to_le64(dma);
211212
rx_desc->read.hdr_addr = 0;
212213

213214
rx_desc++;
214-
bi++;
215-
ntu++;
216-
217-
if (unlikely(ntu == rx_ring->count)) {
218-
rx_desc = I40E_RX_DESC(rx_ring, 0);
219-
bi = i40e_rx_bi(rx_ring, 0);
220-
ntu = 0;
221-
}
222-
} while (--count);
215+
xdp++;
216+
}
223217

224-
no_buffers:
225-
if (rx_ring->next_to_use != ntu) {
226-
/* clear the status bits for the next_to_use descriptor */
227-
rx_desc->wb.qword1.status_error_len = 0;
228-
i40e_release_rx_desc(rx_ring, ntu);
218+
ntu += nb_buffs;
219+
if (ntu == rx_ring->count) {
220+
rx_desc = I40E_RX_DESC(rx_ring, 0);
221+
xdp = i40e_rx_bi(rx_ring, 0);
222+
ntu = 0;
229223
}
230224

231-
return ok;
225+
/* clear the status bits for the next_to_use descriptor */
226+
rx_desc->wb.qword1.status_error_len = 0;
227+
i40e_release_rx_desc(rx_ring, ntu);
228+
229+
return count == nb_buffs ? true : false;
232230
}
233231

234232
/**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
365363
break;
366364

367365
bi = *i40e_rx_bi(rx_ring, next_to_clean);
368-
bi->data_end = bi->data + size;
366+
xsk_buff_set_size(bi, size);
369367
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
370368

371369
xdp_res = i40e_run_xdp_zc(rx_ring, bi);

0 commit comments

Comments
 (0)