@@ -187,24 +187,28 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
187187{
188188 u16 i = tx_ring -> next_to_clean ;
189189 struct igc_tx_buffer * tx_buffer = & tx_ring -> tx_buffer_info [i ];
190+ u32 xsk_frames = 0 ;
190191
191192 while (i != tx_ring -> next_to_use ) {
192193 union igc_adv_tx_desc * eop_desc , * tx_desc ;
193194
194195 switch (tx_buffer -> type ) {
196+ case IGC_TX_BUFFER_TYPE_XSK :
197+ xsk_frames ++ ;
198+ break ;
195199 case IGC_TX_BUFFER_TYPE_XDP :
196200 xdp_return_frame (tx_buffer -> xdpf );
201+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
197202 break ;
198203 case IGC_TX_BUFFER_TYPE_SKB :
199204 dev_kfree_skb_any (tx_buffer -> skb );
205+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
200206 break ;
201207 default :
202208 netdev_warn_once (tx_ring -> netdev , "Unknown Tx buffer type\n" );
203209 break ;
204210 }
205211
206- igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
207-
208212 /* check for eop_desc to determine the end of the packet */
209213 eop_desc = tx_buffer -> next_to_watch ;
210214 tx_desc = IGC_TX_DESC (tx_ring , i );
@@ -234,6 +238,9 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
234238 }
235239 }
236240
241+ if (tx_ring -> xsk_pool && xsk_frames )
242+ xsk_tx_completed (tx_ring -> xsk_pool , xsk_frames );
243+
237244 /* reset BQL for queue */
238245 netdev_tx_reset_queue (txring_txq (tx_ring ));
239246
@@ -676,6 +683,8 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
676683 u64 tdba = ring -> dma ;
677684 u32 txdctl = 0 ;
678685
686+ ring -> xsk_pool = igc_get_xsk_pool (adapter , ring );
687+
679688 /* disable the queue */
680689 wr32 (IGC_TXDCTL (reg_idx ), 0 );
681690 wrfl ();
@@ -2509,6 +2518,65 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector,
25092518 q_vector -> tx .total_packets += packets ;
25102519}
25112520
2521+ static void igc_xdp_xmit_zc (struct igc_ring * ring )
2522+ {
2523+ struct xsk_buff_pool * pool = ring -> xsk_pool ;
2524+ struct netdev_queue * nq = txring_txq (ring );
2525+ union igc_adv_tx_desc * tx_desc = NULL ;
2526+ int cpu = smp_processor_id ();
2527+ u16 ntu = ring -> next_to_use ;
2528+ struct xdp_desc xdp_desc ;
2529+ u16 budget ;
2530+
2531+ if (!netif_carrier_ok (ring -> netdev ))
2532+ return ;
2533+
2534+ __netif_tx_lock (nq , cpu );
2535+
2536+ budget = igc_desc_unused (ring );
2537+
2538+ while (xsk_tx_peek_desc (pool , & xdp_desc ) && budget -- ) {
2539+ u32 cmd_type , olinfo_status ;
2540+ struct igc_tx_buffer * bi ;
2541+ dma_addr_t dma ;
2542+
2543+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2544+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2545+ xdp_desc .len ;
2546+ olinfo_status = xdp_desc .len << IGC_ADVTXD_PAYLEN_SHIFT ;
2547+
2548+ dma = xsk_buff_raw_get_dma (pool , xdp_desc .addr );
2549+ xsk_buff_raw_dma_sync_for_device (pool , dma , xdp_desc .len );
2550+
2551+ tx_desc = IGC_TX_DESC (ring , ntu );
2552+ tx_desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
2553+ tx_desc -> read .olinfo_status = cpu_to_le32 (olinfo_status );
2554+ tx_desc -> read .buffer_addr = cpu_to_le64 (dma );
2555+
2556+ bi = & ring -> tx_buffer_info [ntu ];
2557+ bi -> type = IGC_TX_BUFFER_TYPE_XSK ;
2558+ bi -> protocol = 0 ;
2559+ bi -> bytecount = xdp_desc .len ;
2560+ bi -> gso_segs = 1 ;
2561+ bi -> time_stamp = jiffies ;
2562+ bi -> next_to_watch = tx_desc ;
2563+
2564+ netdev_tx_sent_queue (txring_txq (ring ), xdp_desc .len );
2565+
2566+ ntu ++ ;
2567+ if (ntu == ring -> count )
2568+ ntu = 0 ;
2569+ }
2570+
2571+ ring -> next_to_use = ntu ;
2572+ if (tx_desc ) {
2573+ igc_flush_tx_descriptors (ring );
2574+ xsk_tx_release (pool );
2575+ }
2576+
2577+ __netif_tx_unlock (nq );
2578+ }
2579+
25122580/**
25132581 * igc_clean_tx_irq - Reclaim resources after transmit completes
25142582 * @q_vector: pointer to q_vector containing needed info
@@ -2525,6 +2593,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
25252593 unsigned int i = tx_ring -> next_to_clean ;
25262594 struct igc_tx_buffer * tx_buffer ;
25272595 union igc_adv_tx_desc * tx_desc ;
2596+ u32 xsk_frames = 0 ;
25282597
25292598 if (test_bit (__IGC_DOWN , & adapter -> state ))
25302599 return true;
@@ -2555,19 +2624,22 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
25552624 total_packets += tx_buffer -> gso_segs ;
25562625
25572626 switch (tx_buffer -> type ) {
2627+ case IGC_TX_BUFFER_TYPE_XSK :
2628+ xsk_frames ++ ;
2629+ break ;
25582630 case IGC_TX_BUFFER_TYPE_XDP :
25592631 xdp_return_frame (tx_buffer -> xdpf );
2632+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
25602633 break ;
25612634 case IGC_TX_BUFFER_TYPE_SKB :
25622635 napi_consume_skb (tx_buffer -> skb , napi_budget );
2636+ igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
25632637 break ;
25642638 default :
25652639 netdev_warn_once (tx_ring -> netdev , "Unknown Tx buffer type\n" );
25662640 break ;
25672641 }
25682642
2569- igc_unmap_tx_buffer (tx_ring -> dev , tx_buffer );
2570-
25712643 /* clear last DMA location and unmap remaining buffers */
25722644 while (tx_desc != eop_desc ) {
25732645 tx_buffer ++ ;
@@ -2609,6 +2681,14 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
26092681
26102682 igc_update_tx_stats (q_vector , total_packets , total_bytes );
26112683
2684+ if (tx_ring -> xsk_pool ) {
2685+ if (xsk_frames )
2686+ xsk_tx_completed (tx_ring -> xsk_pool , xsk_frames );
2687+ if (xsk_uses_need_wakeup (tx_ring -> xsk_pool ))
2688+ xsk_set_tx_need_wakeup (tx_ring -> xsk_pool );
2689+ igc_xdp_xmit_zc (tx_ring );
2690+ }
2691+
26122692 if (test_bit (IGC_RING_FLAG_TX_DETECT_HANG , & tx_ring -> flags )) {
26132693 struct igc_hw * hw = & adapter -> hw ;
26142694
@@ -6336,6 +6416,31 @@ void igc_enable_rx_ring(struct igc_ring *ring)
63366416 igc_alloc_rx_buffers (ring , igc_desc_unused (ring ));
63376417}
63386418
6419+ static void igc_disable_tx_ring_hw (struct igc_ring * ring )
6420+ {
6421+ struct igc_hw * hw = & ring -> q_vector -> adapter -> hw ;
6422+ u8 idx = ring -> reg_idx ;
6423+ u32 txdctl ;
6424+
6425+ txdctl = rd32 (IGC_TXDCTL (idx ));
6426+ txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE ;
6427+ txdctl |= IGC_TXDCTL_SWFLUSH ;
6428+ wr32 (IGC_TXDCTL (idx ), txdctl );
6429+ }
6430+
6431+ void igc_disable_tx_ring (struct igc_ring * ring )
6432+ {
6433+ igc_disable_tx_ring_hw (ring );
6434+ igc_clean_tx_ring (ring );
6435+ }
6436+
6437+ void igc_enable_tx_ring (struct igc_ring * ring )
6438+ {
6439+ struct igc_adapter * adapter = ring -> q_vector -> adapter ;
6440+
6441+ igc_configure_tx_ring (adapter , ring );
6442+ }
6443+
63396444/**
63406445 * igc_init_module - Driver Registration Routine
63416446 *
0 commit comments