Skip to content

Commit 2d0de67

Browse files
Yunsheng Linkuba-moo
authored andcommitted
net: veth: use newly added page pool API for veth with xdp
Use page_pool_alloc() API to allocate memory with least memory utilization and performance penalty. Signed-off-by: Yunsheng Lin <[email protected]> CC: Lorenzo Bianconi <[email protected]> CC: Alexander Duyck <[email protected]> CC: Liang Chen <[email protected]> CC: Alexander Lobakin <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 8ab32fa commit 2d0de67

File tree

1 file changed

+16
-9
lines changed

1 file changed

+16
-9
lines changed

drivers/net/veth.c

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -737,10 +737,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
737737
if (skb_shared(skb) || skb_head_is_locked(skb) ||
738738
skb_shinfo(skb)->nr_frags ||
739739
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
740-
u32 size, len, max_head_size, off;
740+
u32 size, len, max_head_size, off, truesize, page_offset;
741741
struct sk_buff *nskb;
742742
struct page *page;
743743
int i, head_off;
744+
void *va;
744745

745746
/* We need a private copy of the skb and data buffers since
746747
* the ebpf program can modify it. We segment the original skb
@@ -753,22 +754,24 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
753754
if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
754755
goto drop;
755756

757+
size = min_t(u32, skb->len, max_head_size);
758+
truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
759+
756760
/* Allocate skb head */
757-
page = page_pool_dev_alloc_pages(rq->page_pool);
758-
if (!page)
761+
va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
762+
if (!va)
759763
goto drop;
760764

761-
nskb = napi_build_skb(page_address(page), PAGE_SIZE);
765+
nskb = napi_build_skb(va, truesize);
762766
if (!nskb) {
763-
page_pool_put_full_page(rq->page_pool, page, true);
767+
page_pool_free_va(rq->page_pool, va, true);
764768
goto drop;
765769
}
766770

767771
skb_reserve(nskb, VETH_XDP_HEADROOM);
768772
skb_copy_header(nskb, skb);
769773
skb_mark_for_recycle(nskb);
770774

771-
size = min_t(u32, skb->len, max_head_size);
772775
if (skb_copy_bits(skb, 0, nskb->data, size)) {
773776
consume_skb(nskb);
774777
goto drop;
@@ -783,14 +786,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
783786
len = skb->len - off;
784787

785788
for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
786-
page = page_pool_dev_alloc_pages(rq->page_pool);
789+
size = min_t(u32, len, PAGE_SIZE);
790+
truesize = size;
791+
792+
page = page_pool_dev_alloc(rq->page_pool, &page_offset,
793+
&truesize);
787794
if (!page) {
788795
consume_skb(nskb);
789796
goto drop;
790797
}
791798

792-
size = min_t(u32, len, PAGE_SIZE);
793-
skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
799+
skb_add_rx_frag(nskb, i, page, page_offset, size,
800+
truesize);
794801
if (skb_copy_bits(skb, off, page_address(page),
795802
size)) {
796803
consume_skb(nskb);

0 commit comments

Comments
 (0)