@@ -126,6 +126,14 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
126126#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
127127#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
128128
129+ /* The dma information of pages allocated at a time. */
130+ struct virtnet_rq_dma {
131+ dma_addr_t addr ;
132+ u32 ref ;
133+ u16 len ;
134+ u16 need_sync ;
135+ };
136+
129137/* Internal representation of a send virtqueue */
130138struct send_queue {
131139 /* Virtqueue associated with this send _queue */
@@ -175,6 +183,12 @@ struct receive_queue {
175183 char name [16 ];
176184
177185 struct xdp_rxq_info xdp_rxq ;
186+
187+ /* Record the last dma info to free after new pages is allocated. */
188+ struct virtnet_rq_dma * last_dma ;
189+
190+ /* Do dma by self */
191+ bool do_dma ;
178192};
179193
180194/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -562,6 +576,156 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
562576 return skb ;
563577}
564578
579+ static void virtnet_rq_unmap (struct receive_queue * rq , void * buf , u32 len )
580+ {
581+ struct page * page = virt_to_head_page (buf );
582+ struct virtnet_rq_dma * dma ;
583+ void * head ;
584+ int offset ;
585+
586+ head = page_address (page );
587+
588+ dma = head ;
589+
590+ -- dma -> ref ;
591+
592+ if (dma -> ref ) {
593+ if (dma -> need_sync && len ) {
594+ offset = buf - (head + sizeof (* dma ));
595+
596+ virtqueue_dma_sync_single_range_for_cpu (rq -> vq , dma -> addr , offset ,
597+ len , DMA_FROM_DEVICE );
598+ }
599+
600+ return ;
601+ }
602+
603+ virtqueue_dma_unmap_single_attrs (rq -> vq , dma -> addr , dma -> len ,
604+ DMA_FROM_DEVICE , DMA_ATTR_SKIP_CPU_SYNC );
605+ put_page (page );
606+ }
607+
608+ static void * virtnet_rq_get_buf (struct receive_queue * rq , u32 * len , void * * ctx )
609+ {
610+ void * buf ;
611+
612+ buf = virtqueue_get_buf_ctx (rq -> vq , len , ctx );
613+ if (buf && rq -> do_dma )
614+ virtnet_rq_unmap (rq , buf , * len );
615+
616+ return buf ;
617+ }
618+
619+ static void * virtnet_rq_detach_unused_buf (struct receive_queue * rq )
620+ {
621+ void * buf ;
622+
623+ buf = virtqueue_detach_unused_buf (rq -> vq );
624+ if (buf && rq -> do_dma )
625+ virtnet_rq_unmap (rq , buf , 0 );
626+
627+ return buf ;
628+ }
629+
630+ static void virtnet_rq_init_one_sg (struct receive_queue * rq , void * buf , u32 len )
631+ {
632+ struct virtnet_rq_dma * dma ;
633+ dma_addr_t addr ;
634+ u32 offset ;
635+ void * head ;
636+
637+ if (!rq -> do_dma ) {
638+ sg_init_one (rq -> sg , buf , len );
639+ return ;
640+ }
641+
642+ head = page_address (rq -> alloc_frag .page );
643+
644+ offset = buf - head ;
645+
646+ dma = head ;
647+
648+ addr = dma -> addr - sizeof (* dma ) + offset ;
649+
650+ sg_init_table (rq -> sg , 1 );
651+ rq -> sg [0 ].dma_address = addr ;
652+ rq -> sg [0 ].length = len ;
653+ }
654+
655+ static void * virtnet_rq_alloc (struct receive_queue * rq , u32 size , gfp_t gfp )
656+ {
657+ struct page_frag * alloc_frag = & rq -> alloc_frag ;
658+ struct virtnet_rq_dma * dma ;
659+ void * buf , * head ;
660+ dma_addr_t addr ;
661+
662+ if (unlikely (!skb_page_frag_refill (size , alloc_frag , gfp )))
663+ return NULL ;
664+
665+ head = page_address (alloc_frag -> page );
666+
667+ if (rq -> do_dma ) {
668+ dma = head ;
669+
670+ /* new pages */
671+ if (!alloc_frag -> offset ) {
672+ if (rq -> last_dma ) {
673+ /* Now, the new page is allocated, the last dma
674+ * will not be used. So the dma can be unmapped
675+ * if the ref is 0.
676+ */
677+ virtnet_rq_unmap (rq , rq -> last_dma , 0 );
678+ rq -> last_dma = NULL ;
679+ }
680+
681+ dma -> len = alloc_frag -> size - sizeof (* dma );
682+
683+ addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
684+ dma -> len , DMA_FROM_DEVICE , 0 );
685+ if (virtqueue_dma_mapping_error (rq -> vq , addr ))
686+ return NULL ;
687+
688+ dma -> addr = addr ;
689+ dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
690+
691+ /* Add a reference to dma to prevent the entire dma from
692+ * being released during error handling. This reference
693+ * will be freed after the pages are no longer used.
694+ */
695+ get_page (alloc_frag -> page );
696+ dma -> ref = 1 ;
697+ alloc_frag -> offset = sizeof (* dma );
698+
699+ rq -> last_dma = dma ;
700+ }
701+
702+ ++ dma -> ref ;
703+ }
704+
705+ buf = head + alloc_frag -> offset ;
706+
707+ get_page (alloc_frag -> page );
708+ alloc_frag -> offset += size ;
709+
710+ return buf ;
711+ }
712+
713+ static void virtnet_rq_set_premapped (struct virtnet_info * vi )
714+ {
715+ int i ;
716+
717+ /* disable for big mode */
718+ if (!vi -> mergeable_rx_bufs && vi -> big_packets )
719+ return ;
720+
721+ for (i = 0 ; i < vi -> max_queue_pairs ; i ++ ) {
722+ if (virtqueue_set_dma_premapped (vi -> rq [i ].vq ))
723+ continue ;
724+
725+ vi -> rq [i ].do_dma = true;
726+ }
727+ }
728+
565729static void free_old_xmit_skbs (struct send_queue * sq , bool in_napi )
566730{
567731 unsigned int len ;
@@ -917,7 +1081,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
9171081 void * buf ;
9181082 int off ;
9191083
920- buf = virtqueue_get_buf (rq -> vq , & buflen );
1084+ buf = virtnet_rq_get_buf (rq , & buflen , NULL );
9211085 if (unlikely (!buf ))
9221086 goto err_buf ;
9231087
@@ -1137,7 +1301,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
11371301 int len ;
11381302
11391303 while (num_buf -- > 1 ) {
1140- buf = virtqueue_get_buf (rq -> vq , & len );
1304+ buf = virtnet_rq_get_buf (rq , & len , NULL );
11411305 if (unlikely (!buf )) {
11421306 pr_debug ("%s: rx error: %d buffers missing\n" ,
11431307 dev -> name , num_buf );
@@ -1245,7 +1409,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
12451409 return - EINVAL ;
12461410
12471411 while (-- * num_buf > 0 ) {
1248- buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx );
1412+ buf = virtnet_rq_get_buf (rq , & len , & ctx );
12491413 if (unlikely (!buf )) {
12501414 pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
12511415 dev -> name , * num_buf ,
@@ -1474,7 +1638,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
14741638 while (-- num_buf ) {
14751639 int num_skb_frags ;
14761640
1477- buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx );
1641+ buf = virtnet_rq_get_buf (rq , & len , & ctx );
14781642 if (unlikely (!buf )) {
14791643 pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
14801644 dev -> name , num_buf ,
@@ -1633,7 +1797,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
16331797static int add_recvbuf_small (struct virtnet_info * vi , struct receive_queue * rq ,
16341798 gfp_t gfp )
16351799{
1636- struct page_frag * alloc_frag = & rq -> alloc_frag ;
16371800 char * buf ;
16381801 unsigned int xdp_headroom = virtnet_get_headroom (vi );
16391802 void * ctx = (void * )(unsigned long )xdp_headroom ;
@@ -1642,17 +1805,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
16421805
16431806 len = SKB_DATA_ALIGN (len ) +
16441807 SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
1645- if (unlikely (!skb_page_frag_refill (len , alloc_frag , gfp )))
1808+
1809+ buf = virtnet_rq_alloc (rq , len , gfp );
1810+ if (unlikely (!buf ))
16461811 return - ENOMEM ;
16471812
1648- buf = (char * )page_address (alloc_frag -> page ) + alloc_frag -> offset ;
1649- get_page (alloc_frag -> page );
1650- alloc_frag -> offset += len ;
1651- sg_init_one (rq -> sg , buf + VIRTNET_RX_PAD + xdp_headroom ,
1652- vi -> hdr_len + GOOD_PACKET_LEN );
1813+ virtnet_rq_init_one_sg (rq , buf + VIRTNET_RX_PAD + xdp_headroom ,
1814+ vi -> hdr_len + GOOD_PACKET_LEN );
1815+
16531816 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
1654- if (err < 0 )
1817+ if (err < 0 ) {
1818+ if (rq -> do_dma )
1819+ virtnet_rq_unmap (rq , buf , 0 );
16551820 put_page (virt_to_head_page (buf ));
1821+ }
1822+
16561823 return err ;
16571824}
16581825
@@ -1729,23 +1896,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
17291896 unsigned int headroom = virtnet_get_headroom (vi );
17301897 unsigned int tailroom = headroom ? sizeof (struct skb_shared_info ) : 0 ;
17311898 unsigned int room = SKB_DATA_ALIGN (headroom + tailroom );
1732- char * buf ;
1899+ unsigned int len , hole ;
17331900 void * ctx ;
1901+ char * buf ;
17341902 int err ;
1735- unsigned int len , hole ;
17361903
17371904 /* Extra tailroom is needed to satisfy XDP's assumption. This
17381905 * means rx frags coalescing won't work, but consider we've
17391906 * disabled GSO for XDP, it won't be a big issue.
17401907 */
17411908 len = get_mergeable_buf_len (rq , & rq -> mrg_avg_pkt_len , room );
1742- if (unlikely (!skb_page_frag_refill (len + room , alloc_frag , gfp )))
1909+
1910+ buf = virtnet_rq_alloc (rq , len + room , gfp );
1911+ if (unlikely (!buf ))
17431912 return - ENOMEM ;
17441913
1745- buf = (char * )page_address (alloc_frag -> page ) + alloc_frag -> offset ;
17461914 buf += headroom ; /* advance address leaving hole at front of pkt */
1747- get_page (alloc_frag -> page );
1748- alloc_frag -> offset += len + room ;
17491915 hole = alloc_frag -> size - alloc_frag -> offset ;
17501916 if (hole < len + room ) {
17511917 /* To avoid internal fragmentation, if there is very likely not
@@ -1759,11 +1925,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
17591925 alloc_frag -> offset += hole ;
17601926 }
17611927
1762- sg_init_one (rq -> sg , buf , len );
1928+ virtnet_rq_init_one_sg (rq , buf , len );
1929+
17631930 ctx = mergeable_len_to_ctx (len + room , headroom );
17641931 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
1765- if (err < 0 )
1932+ if (err < 0 ) {
1933+ if (rq -> do_dma )
1934+ virtnet_rq_unmap (rq , buf , 0 );
17661935 put_page (virt_to_head_page (buf ));
1936+ }
17671937
17681938 return err ;
17691939}
@@ -1884,13 +2054,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
18842054 void * ctx ;
18852055
18862056 while (stats .packets < budget &&
1887- (buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx ))) {
2057+ (buf = virtnet_rq_get_buf (rq , & len , & ctx ))) {
18882058 receive_buf (vi , rq , buf , len , ctx , xdp_xmit , & stats );
18892059 stats .packets ++ ;
18902060 }
18912061 } else {
18922062 while (stats .packets < budget &&
1893- (buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
2063+ (buf = virtnet_rq_get_buf (rq , & len , NULL )) != NULL ) {
18942064 receive_buf (vi , rq , buf , len , NULL , xdp_xmit , & stats );
18952065 stats .packets ++ ;
18962066 }
@@ -3662,8 +3832,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
36623832{
36633833 int i ;
36643834 for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
3665- if (vi -> rq [i ].alloc_frag .page )
3835+ if (vi -> rq [i ].alloc_frag .page ) {
3836+ if (vi -> rq [i ].do_dma && vi -> rq [i ].last_dma )
3837+ virtnet_rq_unmap (& vi -> rq [i ], vi -> rq [i ].last_dma , 0 );
36663838 put_page (vi -> rq [i ].alloc_frag .page );
3839+ }
36673840}
36683841
36693842static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf )
@@ -3700,9 +3873,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
37003873 }
37013874
37023875 for (i = 0 ; i < vi -> max_queue_pairs ; i ++ ) {
3703- struct virtqueue * vq = vi -> rq [i ].vq ;
3704- while ((buf = virtqueue_detach_unused_buf (vq )) != NULL )
3705- virtnet_rq_free_unused_buf (vq , buf );
3876+ struct receive_queue * rq = & vi -> rq [i ];
3877+
3878+ while ((buf = virtnet_rq_detach_unused_buf (rq )) != NULL )
3879+ virtnet_rq_free_unused_buf (rq -> vq , buf );
37063880 cond_resched ();
37073881 }
37083882}
@@ -3876,6 +4050,8 @@ static int init_vqs(struct virtnet_info *vi)
38764050 if (ret )
38774051 goto err_free ;
38784052
4053+ virtnet_rq_set_premapped (vi );
4054+
38794055 cpus_read_lock ();
38804056 virtnet_set_affinity (vi );
38814057 cpus_read_unlock ();
0 commit comments