Skip to content

Commit edd53ee

Browse files
fengidrigregkh
authored andcommitted
virtio_net: xsk: bind/unbind xsk for tx
[ Upstream commit 21a4e3c ] This patch implement the logic of bind/unbind xsk pool to sq and rq. Signed-off-by: Xuan Zhuo <[email protected]> Acked-by: Jason Wang <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]> Stable-dep-of: 4397684 ("virtio-net: free xsk_buffs on error in virtnet_xsk_pool_enable()") Signed-off-by: Sasha Levin <[email protected]>
1 parent 98cd7ed commit edd53ee

File tree

1 file changed

+53
-0
lines changed

1 file changed

+53
-0
lines changed

drivers/net/virtio_net.c

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,10 @@ struct send_queue {
298298

299299
/* Record whether sq is in reset state. */
300300
bool reset;
301+
302+
struct xsk_buff_pool *xsk_pool;
303+
304+
dma_addr_t xsk_hdr_dma_addr;
301305
};
302306

303307
/* Internal representation of a receive virtqueue */
@@ -501,6 +505,8 @@ struct virtio_net_common_hdr {
501505
};
502506
};
503507

508+
static struct virtio_net_common_hdr xsk_hdr;
509+
504510
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
505511
static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
506512
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
@@ -5556,6 +5562,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
55565562
return err;
55575563
}
55585564

5565+
static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
5566+
struct send_queue *sq,
5567+
struct xsk_buff_pool *pool)
5568+
{
5569+
int err, qindex;
5570+
5571+
qindex = sq - vi->sq;
5572+
5573+
virtnet_tx_pause(vi, sq);
5574+
5575+
err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
5576+
if (err) {
5577+
netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
5578+
pool = NULL;
5579+
}
5580+
5581+
sq->xsk_pool = pool;
5582+
5583+
virtnet_tx_resume(vi, sq);
5584+
5585+
return err;
5586+
}
5587+
55595588
static int virtnet_xsk_pool_enable(struct net_device *dev,
55605589
struct xsk_buff_pool *pool,
55615590
u16 qid)
@@ -5564,6 +5593,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
55645593
struct receive_queue *rq;
55655594
struct device *dma_dev;
55665595
struct send_queue *sq;
5596+
dma_addr_t hdr_dma;
55675597
int err, size;
55685598

55695599
if (vi->hdr_len > xsk_pool_get_headroom(pool))
@@ -5601,6 +5631,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
56015631
if (!rq->xsk_buffs)
56025632
return -ENOMEM;
56035633

5634+
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
5635+
DMA_TO_DEVICE, 0);
5636+
if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
5637+
return -ENOMEM;
5638+
56045639
err = xsk_pool_dma_map(pool, dma_dev, 0);
56055640
if (err)
56065641
goto err_xsk_map;
@@ -5609,11 +5644,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
56095644
if (err)
56105645
goto err_rq;
56115646

5647+
err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
5648+
if (err)
5649+
goto err_sq;
5650+
5651+
/* Now, we do not support tx offload(such as tx csum), so all the tx
5652+
* virtnet hdr is zero. So all the tx packets can share a single hdr.
5653+
*/
5654+
sq->xsk_hdr_dma_addr = hdr_dma;
5655+
56125656
return 0;
56135657

5658+
err_sq:
5659+
virtnet_rq_bind_xsk_pool(vi, rq, NULL);
56145660
err_rq:
56155661
xsk_pool_dma_unmap(pool, 0);
56165662
err_xsk_map:
5663+
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
5664+
DMA_TO_DEVICE, 0);
56175665
return err;
56185666
}
56195667

@@ -5622,19 +5670,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
56225670
struct virtnet_info *vi = netdev_priv(dev);
56235671
struct xsk_buff_pool *pool;
56245672
struct receive_queue *rq;
5673+
struct send_queue *sq;
56255674
int err;
56265675

56275676
if (qid >= vi->curr_queue_pairs)
56285677
return -EINVAL;
56295678

5679+
sq = &vi->sq[qid];
56305680
rq = &vi->rq[qid];
56315681

56325682
pool = rq->xsk_pool;
56335683

56345684
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5685+
err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
56355686

56365687
xsk_pool_dma_unmap(pool, 0);
56375688

5689+
virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
5690+
vi->hdr_len, DMA_TO_DEVICE, 0);
56385691
kvfree(rq->xsk_buffs);
56395692

56405693
return err;

0 commit comments

Comments
 (0)