@@ -15,6 +15,13 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs);
1515static dma_addr_t ionic_tx_map_single (struct ionic_queue * q ,
1616 void * data , size_t len );
1717
18+ static dma_addr_t ionic_tx_map_frag (struct ionic_queue * q ,
19+ const skb_frag_t * frag ,
20+ size_t offset , size_t len );
21+
22+ static void ionic_tx_desc_unmap_bufs (struct ionic_queue * q ,
23+ struct ionic_desc_info * desc_info );
24+
1825static void ionic_tx_clean (struct ionic_queue * q ,
1926 struct ionic_desc_info * desc_info ,
2027 struct ionic_cq_info * cq_info ,
@@ -313,6 +320,7 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
313320 unsigned int nbufs = desc_info -> nbufs ;
314321 struct ionic_buf_info * buf_info ;
315322 struct device * dev = q -> dev ;
323+ int i ;
316324
317325 if (!nbufs )
318326 return ;
@@ -324,6 +332,15 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
324332 __free_pages (buf_info -> page , 0 );
325333 buf_info -> page = NULL ;
326334
335+ buf_info ++ ;
336+ for (i = 1 ; i < nbufs + 1 && buf_info -> page ; i ++ , buf_info ++ ) {
337+ dma_unmap_page (dev , buf_info -> dma_addr ,
338+ buf_info -> len , DMA_TO_DEVICE );
339+ if (desc_info -> act == XDP_TX )
340+ __free_pages (buf_info -> page , 0 );
341+ buf_info -> page = NULL ;
342+ }
343+
327344 if (desc_info -> act == XDP_REDIRECT )
328345 xdp_return_frame (desc_info -> xdpf );
329346
@@ -364,8 +381,38 @@ static int ionic_xdp_post_frame(struct net_device *netdev,
364381 desc_info -> xdpf = frame ;
365382 desc_info -> act = act ;
366383
384+ if (xdp_frame_has_frags (frame )) {
385+ struct ionic_txq_sg_elem * elem ;
386+ struct skb_shared_info * sinfo ;
387+ struct ionic_buf_info * bi ;
388+ skb_frag_t * frag ;
389+ int i ;
390+
391+ bi = & buf_info [1 ];
392+ sinfo = xdp_get_shared_info_from_frame (frame );
393+ frag = sinfo -> frags ;
394+ elem = desc_info -> txq_sg_desc -> elems ;
395+ for (i = 0 ; i < sinfo -> nr_frags ; i ++ , frag ++ , bi ++ ) {
396+ dma_addr = ionic_tx_map_frag (q , frag , 0 , skb_frag_size (frag ));
397+ if (dma_mapping_error (q -> dev , dma_addr )) {
398+ stats -> dma_map_err ++ ;
399+ ionic_tx_desc_unmap_bufs (q , desc_info );
400+ return - EIO ;
401+ }
402+ bi -> dma_addr = dma_addr ;
403+ bi -> len = skb_frag_size (frag );
404+ bi -> page = skb_frag_page (frag );
405+
406+ elem -> addr = cpu_to_le64 (bi -> dma_addr );
407+ elem -> len = cpu_to_le16 (bi -> len );
408+ elem ++ ;
409+
410+ desc_info -> nbufs ++ ;
411+ }
412+ }
413+
367414 cmd = encode_txq_desc_cmd (IONIC_TXQ_DESC_OPCODE_CSUM_NONE ,
368- 0 , 0 , buf_info -> dma_addr );
415+ 0 , ( desc_info -> nbufs - 1 ) , buf_info -> dma_addr );
369416 desc -> cmd = cpu_to_le64 (cmd );
370417 desc -> len = cpu_to_le16 (len );
371418 desc -> csum_start = 0 ;
@@ -449,18 +496,58 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
449496 struct ionic_queue * txq ;
450497 struct netdev_queue * nq ;
451498 struct xdp_frame * xdpf ;
499+ int remain_len ;
500+ int frag_len ;
452501 int err = 0 ;
453502
454503 xdp_init_buff (& xdp_buf , IONIC_PAGE_SIZE , rxq -> xdp_rxq_info );
504+ frag_len = min_t (u16 , len , IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN );
455505 xdp_prepare_buff (& xdp_buf , ionic_rx_buf_va (buf_info ),
456- XDP_PACKET_HEADROOM , len , false);
506+ XDP_PACKET_HEADROOM , frag_len , false);
457507
458508 dma_sync_single_range_for_cpu (rxq -> dev , ionic_rx_buf_pa (buf_info ),
459509 XDP_PACKET_HEADROOM , len ,
460510 DMA_FROM_DEVICE );
461511
462512 prefetchw (& xdp_buf .data_hard_start );
463513
514+ /* We limit MTU size to one buffer if !xdp_has_frags, so
515+ * if the recv len is bigger than one buffer
516+ * then we know we have frag info to gather
517+ */
518+ remain_len = len - frag_len ;
519+ if (remain_len ) {
520+ struct skb_shared_info * sinfo ;
521+ struct ionic_buf_info * bi ;
522+ skb_frag_t * frag ;
523+
524+ bi = buf_info ;
525+ sinfo = xdp_get_shared_info_from_buff (& xdp_buf );
526+ sinfo -> nr_frags = 0 ;
527+ sinfo -> xdp_frags_size = 0 ;
528+ xdp_buff_set_frags_flag (& xdp_buf );
529+
530+ do {
531+ if (unlikely (sinfo -> nr_frags >= MAX_SKB_FRAGS )) {
532+ err = - ENOSPC ;
533+ goto out_xdp_abort ;
534+ }
535+
536+ frag = & sinfo -> frags [sinfo -> nr_frags ];
537+ sinfo -> nr_frags ++ ;
538+ bi ++ ;
539+ frag_len = min_t (u16 , remain_len , ionic_rx_buf_size (bi ));
540+ dma_sync_single_range_for_cpu (rxq -> dev , ionic_rx_buf_pa (bi ),
541+ 0 , frag_len , DMA_FROM_DEVICE );
542+ skb_frag_fill_page_desc (frag , bi -> page , 0 , frag_len );
543+ sinfo -> xdp_frags_size += frag_len ;
544+ remain_len -= frag_len ;
545+
546+ if (page_is_pfmemalloc (bi -> page ))
547+ xdp_buff_set_frag_pfmemalloc (& xdp_buf );
548+ } while (remain_len > 0 );
549+ }
550+
464551 xdp_action = bpf_prog_run_xdp (xdp_prog , & xdp_buf );
465552
466553 switch (xdp_action ) {
0 commit comments