Skip to content

Commit de0b90e

Browse files
elvinongbldavem330
authored andcommitted
net: stmmac: rearrange RX and TX desc init into per-queue basis
Below functions are made to be per-queue in preparation of XDP ZC: __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) The original functions below are stay maintained for all queue usage: init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) init_dma_tx_desc_rings(struct net_device *dev) Signed-off-by: Ong Boon Leong <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent da5ec7f commit de0b90e

File tree

1 file changed

+100
-80
lines changed

1 file changed

+100
-80
lines changed

drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

Lines changed: 100 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -1575,60 +1575,70 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
15751575
}
15761576

15771577
/**
1578-
* init_dma_rx_desc_rings - init the RX descriptor rings
1579-
* @dev: net device structure
1578+
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1579+
* @priv: driver private structure
1580+
* @queue: RX queue index
15801581
* @flags: gfp flag.
15811582
* Description: this function initializes the DMA RX descriptors
15821583
* and allocates the socket buffers. It supports the chained and ring
15831584
* modes.
15841585
*/
1585-
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1586+
static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
15861587
{
1587-
struct stmmac_priv *priv = netdev_priv(dev);
1588-
u32 rx_count = priv->plat->rx_queues_to_use;
1589-
int ret = -ENOMEM;
1590-
int queue;
1588+
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1589+
int ret;
15911590

1592-
/* RX INITIALIZATION */
15931591
netif_dbg(priv, probe, priv->dev,
1594-
"SKB addresses:\nskb\t\tskb data\tdma data\n");
1592+
"(%s) dma_rx_phy=0x%08x\n", __func__,
1593+
(u32)rx_q->dma_rx_phy);
15951594

1596-
for (queue = 0; queue < rx_count; queue++) {
1597-
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1595+
stmmac_clear_rx_descriptors(priv, queue);
15981596

1597+
WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1598+
MEM_TYPE_PAGE_POOL,
1599+
rx_q->page_pool));
15991600

1600-
netif_dbg(priv, probe, priv->dev,
1601-
"(%s) dma_rx_phy=0x%08x\n", __func__,
1602-
(u32)rx_q->dma_rx_phy);
1601+
netdev_info(priv->dev,
1602+
"Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1603+
rx_q->queue_index);
16031604

1604-
stmmac_clear_rx_descriptors(priv, queue);
1605+
ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1606+
if (ret < 0)
1607+
return -ENOMEM;
16051608

1606-
WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1607-
MEM_TYPE_PAGE_POOL,
1608-
rx_q->page_pool));
1609+
rx_q->cur_rx = 0;
1610+
rx_q->dirty_rx = 0;
16091611

1610-
netdev_info(priv->dev,
1611-
"Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1612-
rx_q->queue_index);
1612+
/* Setup the chained descriptor addresses */
1613+
if (priv->mode == STMMAC_CHAIN_MODE) {
1614+
if (priv->extend_desc)
1615+
stmmac_mode_init(priv, rx_q->dma_erx,
1616+
rx_q->dma_rx_phy,
1617+
priv->dma_rx_size, 1);
1618+
else
1619+
stmmac_mode_init(priv, rx_q->dma_rx,
1620+
rx_q->dma_rx_phy,
1621+
priv->dma_rx_size, 0);
1622+
}
16131623

1614-
ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1615-
if (ret < 0)
1616-
goto err_init_rx_buffers;
1624+
return 0;
1625+
}
16171626

1618-
rx_q->cur_rx = 0;
1619-
rx_q->dirty_rx = 0;
1627+
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1628+
{
1629+
struct stmmac_priv *priv = netdev_priv(dev);
1630+
u32 rx_count = priv->plat->rx_queues_to_use;
1631+
u32 queue;
1632+
int ret;
16201633

1621-
/* Setup the chained descriptor addresses */
1622-
if (priv->mode == STMMAC_CHAIN_MODE) {
1623-
if (priv->extend_desc)
1624-
stmmac_mode_init(priv, rx_q->dma_erx,
1625-
rx_q->dma_rx_phy,
1626-
priv->dma_rx_size, 1);
1627-
else
1628-
stmmac_mode_init(priv, rx_q->dma_rx,
1629-
rx_q->dma_rx_phy,
1630-
priv->dma_rx_size, 0);
1631-
}
1634+
/* RX INITIALIZATION */
1635+
netif_dbg(priv, probe, priv->dev,
1636+
"SKB addresses:\nskb\t\tskb data\tdma data\n");
1637+
1638+
for (queue = 0; queue < rx_count; queue++) {
1639+
ret = __init_dma_rx_desc_rings(priv, queue, flags);
1640+
if (ret)
1641+
goto err_init_rx_buffers;
16321642
}
16331643

16341644
return 0;
@@ -1647,63 +1657,73 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
16471657
}
16481658

16491659
/**
1650-
* init_dma_tx_desc_rings - init the TX descriptor rings
1651-
* @dev: net device structure.
1660+
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1661+
* @priv: driver private structure
1662+
* @queue : TX queue index
16521663
* Description: this function initializes the DMA TX descriptors
16531664
* and allocates the socket buffers. It supports the chained and ring
16541665
* modes.
16551666
*/
1656-
static int init_dma_tx_desc_rings(struct net_device *dev)
1667+
static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
16571668
{
1658-
struct stmmac_priv *priv = netdev_priv(dev);
1659-
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1660-
u32 queue;
1669+
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
16611670
int i;
16621671

1663-
for (queue = 0; queue < tx_queue_cnt; queue++) {
1664-
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1665-
1666-
netif_dbg(priv, probe, priv->dev,
1667-
"(%s) dma_tx_phy=0x%08x\n", __func__,
1668-
(u32)tx_q->dma_tx_phy);
1669-
1670-
/* Setup the chained descriptor addresses */
1671-
if (priv->mode == STMMAC_CHAIN_MODE) {
1672-
if (priv->extend_desc)
1673-
stmmac_mode_init(priv, tx_q->dma_etx,
1674-
tx_q->dma_tx_phy,
1675-
priv->dma_tx_size, 1);
1676-
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1677-
stmmac_mode_init(priv, tx_q->dma_tx,
1678-
tx_q->dma_tx_phy,
1679-
priv->dma_tx_size, 0);
1680-
}
1672+
netif_dbg(priv, probe, priv->dev,
1673+
"(%s) dma_tx_phy=0x%08x\n", __func__,
1674+
(u32)tx_q->dma_tx_phy);
16811675

1682-
for (i = 0; i < priv->dma_tx_size; i++) {
1683-
struct dma_desc *p;
1684-
if (priv->extend_desc)
1685-
p = &((tx_q->dma_etx + i)->basic);
1686-
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1687-
p = &((tx_q->dma_entx + i)->basic);
1688-
else
1689-
p = tx_q->dma_tx + i;
1676+
/* Setup the chained descriptor addresses */
1677+
if (priv->mode == STMMAC_CHAIN_MODE) {
1678+
if (priv->extend_desc)
1679+
stmmac_mode_init(priv, tx_q->dma_etx,
1680+
tx_q->dma_tx_phy,
1681+
priv->dma_tx_size, 1);
1682+
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1683+
stmmac_mode_init(priv, tx_q->dma_tx,
1684+
tx_q->dma_tx_phy,
1685+
priv->dma_tx_size, 0);
1686+
}
16901687

1691-
stmmac_clear_desc(priv, p);
1688+
for (i = 0; i < priv->dma_tx_size; i++) {
1689+
struct dma_desc *p;
16921690

1693-
tx_q->tx_skbuff_dma[i].buf = 0;
1694-
tx_q->tx_skbuff_dma[i].map_as_page = false;
1695-
tx_q->tx_skbuff_dma[i].len = 0;
1696-
tx_q->tx_skbuff_dma[i].last_segment = false;
1697-
tx_q->tx_skbuff[i] = NULL;
1698-
}
1691+
if (priv->extend_desc)
1692+
p = &((tx_q->dma_etx + i)->basic);
1693+
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1694+
p = &((tx_q->dma_entx + i)->basic);
1695+
else
1696+
p = tx_q->dma_tx + i;
16991697

1700-
tx_q->dirty_tx = 0;
1701-
tx_q->cur_tx = 0;
1702-
tx_q->mss = 0;
1698+
stmmac_clear_desc(priv, p);
17031699

1704-
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1700+
tx_q->tx_skbuff_dma[i].buf = 0;
1701+
tx_q->tx_skbuff_dma[i].map_as_page = false;
1702+
tx_q->tx_skbuff_dma[i].len = 0;
1703+
tx_q->tx_skbuff_dma[i].last_segment = false;
1704+
tx_q->tx_skbuff[i] = NULL;
17051705
}
17061706

1707+
tx_q->dirty_tx = 0;
1708+
tx_q->cur_tx = 0;
1709+
tx_q->mss = 0;
1710+
1711+
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1712+
1713+
return 0;
1714+
}
1715+
1716+
static int init_dma_tx_desc_rings(struct net_device *dev)
1717+
{
1718+
struct stmmac_priv *priv = netdev_priv(dev);
1719+
u32 tx_queue_cnt;
1720+
u32 queue;
1721+
1722+
tx_queue_cnt = priv->plat->tx_queues_to_use;
1723+
1724+
for (queue = 0; queue < tx_queue_cnt; queue++)
1725+
__init_dma_tx_desc_rings(priv, queue);
1726+
17071727
return 0;
17081728
}
17091729

0 commit comments

Comments
 (0)