Skip to content

Commit 7e47fd8

Browse files
gclementdavem330
authored andcommitted
net: mvneta: Allocate page for the descriptor
Instead of trying to allocate the exact amount of memory for each descriptor use a page for each of them, it allows to simplify the allocation management and increase the performance of the driver. Based on the work of Yelena Krivosheev <[email protected]> Signed-off-by: Gregory CLEMENT <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 17a96da commit 7e47fd8

File tree

2 files changed

+24
-41
lines changed

2 files changed

+24
-41
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 24 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1795,47 +1795,30 @@ static void mvneta_txq_done(struct mvneta_port *pp,
17951795
}
17961796
}
17971797

1798-
void *mvneta_frag_alloc(unsigned int frag_size)
1799-
{
1800-
if (likely(frag_size <= PAGE_SIZE))
1801-
return netdev_alloc_frag(frag_size);
1802-
else
1803-
return kmalloc(frag_size, GFP_ATOMIC);
1804-
}
1805-
EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1806-
1807-
void mvneta_frag_free(unsigned int frag_size, void *data)
1808-
{
1809-
if (likely(frag_size <= PAGE_SIZE))
1810-
skb_free_frag(data);
1811-
else
1812-
kfree(data);
1813-
}
1814-
EXPORT_SYMBOL_GPL(mvneta_frag_free);
1815-
18161798
/* Refill processing for SW buffer management */
1799+
/* Allocate page per descriptor */
18171800
static int mvneta_rx_refill(struct mvneta_port *pp,
18181801
struct mvneta_rx_desc *rx_desc,
1819-
struct mvneta_rx_queue *rxq)
1820-
1802+
struct mvneta_rx_queue *rxq,
1803+
gfp_t gfp_mask)
18211804
{
18221805
dma_addr_t phys_addr;
1823-
void *data;
1806+
struct page *page;
18241807

1825-
data = mvneta_frag_alloc(pp->frag_size);
1826-
if (!data)
1808+
page = __dev_alloc_page(gfp_mask);
1809+
if (!page)
18271810
return -ENOMEM;
18281811

1829-
phys_addr = dma_map_single(pp->dev->dev.parent, data,
1830-
MVNETA_RX_BUF_SIZE(pp->pkt_size),
1831-
DMA_FROM_DEVICE);
1812+
/* map page for use */
1813+
phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1814+
DMA_FROM_DEVICE);
18321815
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1833-
mvneta_frag_free(pp->frag_size, data);
1816+
__free_page(page);
18341817
return -ENOMEM;
18351818
}
18361819

18371820
phys_addr += pp->rx_offset_correction;
1838-
mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1821+
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
18391822
return 0;
18401823
}
18411824

@@ -1901,7 +1884,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
19011884

19021885
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
19031886
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1904-
mvneta_frag_free(pp->frag_size, data);
1887+
__free_page(data);
19051888
}
19061889
}
19071890

@@ -1928,6 +1911,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
19281911
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
19291912
struct sk_buff *skb;
19301913
unsigned char *data;
1914+
struct page *page;
19311915
dma_addr_t phys_addr;
19321916
u32 rx_status, frag_size;
19331917
int rx_bytes, err, index;
@@ -1936,7 +1920,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
19361920
rx_status = rx_desc->status;
19371921
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
19381922
index = rx_desc - rxq->descs;
1939-
data = rxq->buf_virt_addr[index];
1923+
page = (struct page *)rxq->buf_virt_addr[index];
1924+
data = page_address(page);
1925+
/* Prefetch header */
1926+
prefetch(data);
19401927
phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
19411928

19421929
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
@@ -1979,7 +1966,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
19791966
}
19801967

19811968
/* Refill processing */
1982-
err = mvneta_rx_refill(pp, rx_desc, rxq);
1969+
err = mvneta_rx_refill(pp, rx_desc, rxq, GFP_KERNEL);
19831970
if (err) {
19841971
netdev_err(dev, "Linux processing - Can't refill\n");
19851972
rxq->refill_err++;
@@ -2773,9 +2760,11 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
27732760

27742761
for (i = 0; i < num; i++) {
27752762
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2776-
if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2777-
netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2778-
__func__, rxq->id, i, num);
2763+
if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2764+
GFP_KERNEL) != 0) {
2765+
netdev_err(pp->dev,
2766+
"%s:rxq %d, %d of %d buffs filled\n",
2767+
__func__, rxq->id, i, num);
27792768
break;
27802769
}
27812770
}
@@ -3189,8 +3178,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
31893178
mvneta_bm_update_mtu(pp, mtu);
31903179

31913180
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3192-
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3193-
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
31943181

31953182
ret = mvneta_setup_rxqs(pp);
31963183
if (ret) {
@@ -3677,8 +3664,7 @@ static int mvneta_open(struct net_device *dev)
36773664
int ret;
36783665

36793666
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3680-
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3681-
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3667+
pp->frag_size = PAGE_SIZE;
36823668

36833669
ret = mvneta_setup_rxqs(pp);
36843670
if (ret)

drivers/net/ethernet/marvell/mvneta_bm.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -130,9 +130,6 @@ struct mvneta_bm_pool {
130130
};
131131

132132
/* Declarations and definitions */
133-
void *mvneta_frag_alloc(unsigned int frag_size);
134-
void mvneta_frag_free(unsigned int frag_size, void *data);
135-
136133
#if IS_ENABLED(CONFIG_MVNETA_BM)
137134
struct mvneta_bm *mvneta_bm_get(struct device_node *node);
138135
void mvneta_bm_put(struct mvneta_bm *priv);

0 commit comments

Comments
 (0)