Skip to content

Commit b0a43db

Browse files
LorenzoBianconidavem330
authored andcommitted
net: mvneta: add XDP_TX support
Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function pointer Signed-off-by: Lorenzo Bianconi <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 9e58c8b commit b0a43db

File tree

1 file changed

+121
-7
lines changed

1 file changed

+121
-7
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 121 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1800,16 +1800,19 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
18001800

18011801
mvneta_txq_inc_get(txq);
18021802

1803-
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1803+
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
1804+
buf->type != MVNETA_TYPE_XDP_TX)
18041805
dma_unmap_single(pp->dev->dev.parent,
18051806
tx_desc->buf_phys_addr,
18061807
tx_desc->data_size, DMA_TO_DEVICE);
1807-
if (!buf->skb)
1808-
continue;
1809-
1810-
bytes_compl += buf->skb->len;
1811-
pkts_compl++;
1812-
dev_kfree_skb_any(buf->skb);
1808+
if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
1809+
bytes_compl += buf->skb->len;
1810+
pkts_compl++;
1811+
dev_kfree_skb_any(buf->skb);
1812+
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
1813+
buf->type == MVNETA_TYPE_XDP_NDO) {
1814+
xdp_return_frame(buf->xdpf);
1815+
}
18131816
}
18141817

18151818
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
@@ -1973,6 +1976,111 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
19731976
return i;
19741977
}
19751978

1979+
static int
1980+
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
1981+
struct xdp_frame *xdpf, bool dma_map)
1982+
{
1983+
struct mvneta_tx_desc *tx_desc;
1984+
struct mvneta_tx_buf *buf;
1985+
dma_addr_t dma_addr;
1986+
1987+
if (txq->count >= txq->tx_stop_threshold)
1988+
return MVNETA_XDP_DROPPED;
1989+
1990+
tx_desc = mvneta_txq_next_desc_get(txq);
1991+
1992+
buf = &txq->buf[txq->txq_put_index];
1993+
if (dma_map) {
1994+
/* ndo_xdp_xmit */
1995+
dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
1996+
xdpf->len, DMA_TO_DEVICE);
1997+
if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
1998+
mvneta_txq_desc_put(txq);
1999+
return MVNETA_XDP_DROPPED;
2000+
}
2001+
buf->type = MVNETA_TYPE_XDP_NDO;
2002+
} else {
2003+
struct page *page = virt_to_page(xdpf->data);
2004+
2005+
dma_addr = page_pool_get_dma_addr(page) +
2006+
sizeof(*xdpf) + xdpf->headroom;
2007+
dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2008+
xdpf->len, DMA_BIDIRECTIONAL);
2009+
buf->type = MVNETA_TYPE_XDP_TX;
2010+
}
2011+
buf->xdpf = xdpf;
2012+
2013+
tx_desc->command = MVNETA_TXD_FLZ_DESC;
2014+
tx_desc->buf_phys_addr = dma_addr;
2015+
tx_desc->data_size = xdpf->len;
2016+
2017+
mvneta_update_stats(pp, 1, xdpf->len, true);
2018+
mvneta_txq_inc_put(txq);
2019+
txq->pending++;
2020+
txq->count++;
2021+
2022+
return MVNETA_XDP_TX;
2023+
}
2024+
2025+
static int
2026+
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2027+
{
2028+
struct mvneta_tx_queue *txq;
2029+
struct netdev_queue *nq;
2030+
struct xdp_frame *xdpf;
2031+
int cpu;
2032+
u32 ret;
2033+
2034+
xdpf = convert_to_xdp_frame(xdp);
2035+
if (unlikely(!xdpf))
2036+
return MVNETA_XDP_DROPPED;
2037+
2038+
cpu = smp_processor_id();
2039+
txq = &pp->txqs[cpu % txq_number];
2040+
nq = netdev_get_tx_queue(pp->dev, txq->id);
2041+
2042+
__netif_tx_lock(nq, cpu);
2043+
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2044+
if (ret == MVNETA_XDP_TX)
2045+
mvneta_txq_pend_desc_add(pp, txq, 0);
2046+
__netif_tx_unlock(nq);
2047+
2048+
return ret;
2049+
}
2050+
2051+
static int
2052+
mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2053+
struct xdp_frame **frames, u32 flags)
2054+
{
2055+
struct mvneta_port *pp = netdev_priv(dev);
2056+
int cpu = smp_processor_id();
2057+
struct mvneta_tx_queue *txq;
2058+
struct netdev_queue *nq;
2059+
int i, drops = 0;
2060+
u32 ret;
2061+
2062+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2063+
return -EINVAL;
2064+
2065+
txq = &pp->txqs[cpu % txq_number];
2066+
nq = netdev_get_tx_queue(pp->dev, txq->id);
2067+
2068+
__netif_tx_lock(nq, cpu);
2069+
for (i = 0; i < num_frame; i++) {
2070+
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2071+
if (ret != MVNETA_XDP_TX) {
2072+
xdp_return_frame_rx_napi(frames[i]);
2073+
drops++;
2074+
}
2075+
}
2076+
2077+
if (unlikely(flags & XDP_XMIT_FLUSH))
2078+
mvneta_txq_pend_desc_add(pp, txq, 0);
2079+
__netif_tx_unlock(nq);
2080+
2081+
return num_frame - drops;
2082+
}
2083+
19762084
static int
19772085
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
19782086
struct bpf_prog *prog, struct xdp_buff *xdp)
@@ -1995,6 +2103,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
19952103
}
19962104
break;
19972105
}
2106+
case XDP_TX:
2107+
ret = mvneta_xdp_xmit_back(pp, xdp);
2108+
if (ret != MVNETA_XDP_TX)
2109+
xdp_return_buff(xdp);
2110+
break;
19982111
default:
19992112
bpf_warn_invalid_xdp_action(act);
20002113
/* fall through */
@@ -4534,6 +4647,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
45344647
.ndo_get_stats64 = mvneta_get_stats64,
45354648
.ndo_do_ioctl = mvneta_ioctl,
45364649
.ndo_bpf = mvneta_xdp,
4650+
.ndo_xdp_xmit = mvneta_xdp_xmit,
45374651
};
45384652

45394653
static const struct ethtool_ops mvneta_eth_tool_ops = {

0 commit comments

Comments
 (0)