@@ -298,6 +298,10 @@ struct send_queue {
298
298
299
299
/* Record whether sq is in reset state. */
300
300
bool reset ;
301
+
302
+ struct xsk_buff_pool * xsk_pool ;
303
+
304
+ dma_addr_t xsk_hdr_dma_addr ;
301
305
};
302
306
303
307
/* Internal representation of a receive virtqueue */
@@ -501,6 +505,8 @@ struct virtio_net_common_hdr {
501
505
};
502
506
};
503
507
508
+ static struct virtio_net_common_hdr xsk_hdr ;
509
+
504
510
static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf );
505
511
static void virtnet_sq_free_unused_buf_done (struct virtqueue * vq );
506
512
static int virtnet_xdp_handler (struct bpf_prog * xdp_prog , struct xdp_buff * xdp ,
@@ -5556,6 +5562,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
5556
5562
return err ;
5557
5563
}
5558
5564
5565
+ static int virtnet_sq_bind_xsk_pool (struct virtnet_info * vi ,
5566
+ struct send_queue * sq ,
5567
+ struct xsk_buff_pool * pool )
5568
+ {
5569
+ int err , qindex ;
5570
+
5571
+ qindex = sq - vi -> sq ;
5572
+
5573
+ virtnet_tx_pause (vi , sq );
5574
+
5575
+ err = virtqueue_reset (sq -> vq , virtnet_sq_free_unused_buf );
5576
+ if (err ) {
5577
+ netdev_err (vi -> dev , "reset tx fail: tx queue index: %d err: %d\n" , qindex , err );
5578
+ pool = NULL ;
5579
+ }
5580
+
5581
+ sq -> xsk_pool = pool ;
5582
+
5583
+ virtnet_tx_resume (vi , sq );
5584
+
5585
+ return err ;
5586
+ }
5587
+
5559
5588
static int virtnet_xsk_pool_enable (struct net_device * dev ,
5560
5589
struct xsk_buff_pool * pool ,
5561
5590
u16 qid )
@@ -5564,6 +5593,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5564
5593
struct receive_queue * rq ;
5565
5594
struct device * dma_dev ;
5566
5595
struct send_queue * sq ;
5596
+ dma_addr_t hdr_dma ;
5567
5597
int err , size ;
5568
5598
5569
5599
if (vi -> hdr_len > xsk_pool_get_headroom (pool ))
@@ -5601,6 +5631,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5601
5631
if (!rq -> xsk_buffs )
5602
5632
return - ENOMEM ;
5603
5633
5634
+ hdr_dma = virtqueue_dma_map_single_attrs (sq -> vq , & xsk_hdr , vi -> hdr_len ,
5635
+ DMA_TO_DEVICE , 0 );
5636
+ if (virtqueue_dma_mapping_error (sq -> vq , hdr_dma ))
5637
+ return - ENOMEM ;
5638
+
5604
5639
err = xsk_pool_dma_map (pool , dma_dev , 0 );
5605
5640
if (err )
5606
5641
goto err_xsk_map ;
@@ -5609,11 +5644,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5609
5644
if (err )
5610
5645
goto err_rq ;
5611
5646
5647
+ err = virtnet_sq_bind_xsk_pool (vi , sq , pool );
5648
+ if (err )
5649
+ goto err_sq ;
5650
+
5651
+ /* Now, we do not support tx offload(such as tx csum), so all the tx
5652
+ * virtnet hdr is zero. So all the tx packets can share a single hdr.
5653
+ */
5654
+ sq -> xsk_hdr_dma_addr = hdr_dma ;
5655
+
5612
5656
return 0 ;
5613
5657
5658
+ err_sq :
5659
+ virtnet_rq_bind_xsk_pool (vi , rq , NULL );
5614
5660
err_rq :
5615
5661
xsk_pool_dma_unmap (pool , 0 );
5616
5662
err_xsk_map :
5663
+ virtqueue_dma_unmap_single_attrs (rq -> vq , hdr_dma , vi -> hdr_len ,
5664
+ DMA_TO_DEVICE , 0 );
5617
5665
return err ;
5618
5666
}
5619
5667
@@ -5622,19 +5670,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5622
5670
struct virtnet_info * vi = netdev_priv (dev );
5623
5671
struct xsk_buff_pool * pool ;
5624
5672
struct receive_queue * rq ;
5673
+ struct send_queue * sq ;
5625
5674
int err ;
5626
5675
5627
5676
if (qid >= vi -> curr_queue_pairs )
5628
5677
return - EINVAL ;
5629
5678
5679
+ sq = & vi -> sq [qid ];
5630
5680
rq = & vi -> rq [qid ];
5631
5681
5632
5682
pool = rq -> xsk_pool ;
5633
5683
5634
5684
err = virtnet_rq_bind_xsk_pool (vi , rq , NULL );
5685
+ err |= virtnet_sq_bind_xsk_pool (vi , sq , NULL );
5635
5686
5636
5687
xsk_pool_dma_unmap (pool , 0 );
5637
5688
5689
+ virtqueue_dma_unmap_single_attrs (sq -> vq , sq -> xsk_hdr_dma_addr ,
5690
+ vi -> hdr_len , DMA_TO_DEVICE , 0 );
5638
5691
kvfree (rq -> xsk_buffs );
5639
5692
5640
5693
return err ;
0 commit comments