Skip to content

Commit e8db37b

Browse files
committed
net: mana: Switch to page pool for jumbo frames
jira LE-4385 Rebuild_History Non-Buildable kernel-5.14.0-570.52.1.el9_6 commit-author Haiyang Zhang <[email protected]> commit fa37a88 Frag allocators, such as netdev_alloc_frag(), were not designed to work for fragsz > PAGE_SIZE. So, switch to page pool for jumbo frames instead of using page frag allocators. This driver is using page pool for smaller MTUs already. Cc: [email protected] Fixes: 80f6215 ("net: mana: Add support for jumbo frame") Signed-off-by: Haiyang Zhang <[email protected]> Reviewed-by: Long Li <[email protected]> Reviewed-by: Shradha Gupta <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]> (cherry picked from commit fa37a88) Signed-off-by: Jonathan Maple <[email protected]>
1 parent a8781f1 commit e8db37b

File tree

1 file changed

+9
-37
lines changed

1 file changed

+9
-37
lines changed

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 9 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -659,30 +659,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
659659
mpc->rxbpre_total = 0;
660660

661661
for (i = 0; i < num_rxb; i++) {
662-
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
663-
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
664-
if (!va)
665-
goto error;
666-
667-
page = virt_to_head_page(va);
668-
/* Check if the frag falls back to single page */
669-
if (compound_order(page) <
670-
get_order(mpc->rxbpre_alloc_size)) {
671-
put_page(page);
672-
goto error;
673-
}
674-
} else {
675-
page = dev_alloc_page();
676-
if (!page)
677-
goto error;
662+
page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
663+
if (!page)
664+
goto error;
678665

679-
va = page_to_virt(page);
680-
}
666+
va = page_to_virt(page);
681667

682668
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
683669
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
684670
if (dma_mapping_error(dev, da)) {
685-
put_page(virt_to_head_page(va));
671+
put_page(page);
686672
goto error;
687673
}
688674

@@ -1674,7 +1660,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool,
16741660
}
16751661

16761662
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1677-
dma_addr_t *da, bool *from_pool, bool is_napi)
1663+
dma_addr_t *da, bool *from_pool)
16781664
{
16791665
struct page *page;
16801666
void *va;
@@ -1685,21 +1671,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
16851671
if (rxq->xdp_save_va) {
16861672
va = rxq->xdp_save_va;
16871673
rxq->xdp_save_va = NULL;
1688-
} else if (rxq->alloc_size > PAGE_SIZE) {
1689-
if (is_napi)
1690-
va = napi_alloc_frag(rxq->alloc_size);
1691-
else
1692-
va = netdev_alloc_frag(rxq->alloc_size);
1693-
1694-
if (!va)
1695-
return NULL;
1696-
1697-
page = virt_to_head_page(va);
1698-
/* Check if the frag falls back to single page */
1699-
if (compound_order(page) < get_order(rxq->alloc_size)) {
1700-
put_page(page);
1701-
return NULL;
1702-
}
17031674
} else {
17041675
page = page_pool_dev_alloc_pages(rxq->page_pool);
17051676
if (!page)
@@ -1732,7 +1703,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
17321703
dma_addr_t da;
17331704
void *va;
17341705

1735-
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1706+
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
17361707
if (!va)
17371708
return;
17381709

@@ -2174,7 +2145,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
21742145
if (mpc->rxbufs_pre)
21752146
va = mana_get_rxbuf_pre(rxq, &da);
21762147
else
2177-
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2148+
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
21782149

21792150
if (!va)
21802151
return -ENOMEM;
@@ -2260,6 +2231,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
22602231
pprm.nid = gc->numa_node;
22612232
pprm.napi = &rxq->rx_cq.napi;
22622233
pprm.netdev = rxq->ndev;
2234+
pprm.order = get_order(rxq->alloc_size);
22632235

22642236
rxq->page_pool = page_pool_create(&pprm);
22652237

0 commit comments

Comments
 (0)