77#include <linux/mm.h>
88#include <linux/bpf_trace.h>
99#include <net/xdp.h>
10+ #include "ice_txrx_lib.h"
1011#include "ice_lib.h"
1112#include "ice.h"
1213#include "ice_dcb_lib.h"
@@ -396,37 +397,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
396397 return - ENOMEM ;
397398}
398399
399- /**
400- * ice_release_rx_desc - Store the new tail and head values
401- * @rx_ring: ring to bump
402- * @val: new head index
403- */
404- static void ice_release_rx_desc (struct ice_ring * rx_ring , u32 val )
405- {
406- u16 prev_ntu = rx_ring -> next_to_use ;
407-
408- rx_ring -> next_to_use = val ;
409-
410- /* update next to alloc since we have filled the ring */
411- rx_ring -> next_to_alloc = val ;
412-
413- /* QRX_TAIL will be updated with any tail value, but hardware ignores
414- * the lower 3 bits. This makes it so we only bump tail on meaningful
415- * boundaries. Also, this allows us to bump tail on intervals of 8 up to
416- * the budget depending on the current traffic load.
417- */
418- val &= ~0x7 ;
419- if (prev_ntu != val ) {
420- /* Force memory writes to complete before letting h/w
421- * know there are new descriptors to fetch. (Only
422- * applicable for weak-ordered memory model archs,
423- * such as IA-64).
424- */
425- wmb ();
426- writel (val , rx_ring -> tail );
427- }
428- }
429-
430400/**
431401 * ice_rx_offset - Return expected offset into page to access data
432402 * @rx_ring: Ring we are requesting offset of
@@ -438,89 +408,6 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
438408 return ice_is_xdp_ena_vsi (rx_ring -> vsi ) ? XDP_PACKET_HEADROOM : 0 ;
439409}
440410
441- /**
442- * ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
443- * @xdp_ring: XDP Tx ring
444- *
445- * This function updates the XDP Tx ring tail register.
446- */
447- static void ice_xdp_ring_update_tail (struct ice_ring * xdp_ring )
448- {
449- /* Force memory writes to complete before letting h/w
450- * know there are new descriptors to fetch.
451- */
452- wmb ();
453- writel_relaxed (xdp_ring -> next_to_use , xdp_ring -> tail );
454- }
455-
456- /**
457- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
458- * @data: packet data pointer
459- * @size: packet data size
460- * @xdp_ring: XDP ring for transmission
461- */
462- static int ice_xmit_xdp_ring (void * data , u16 size , struct ice_ring * xdp_ring )
463- {
464- u16 i = xdp_ring -> next_to_use ;
465- struct ice_tx_desc * tx_desc ;
466- struct ice_tx_buf * tx_buf ;
467- dma_addr_t dma ;
468-
469- if (!unlikely (ICE_DESC_UNUSED (xdp_ring ))) {
470- xdp_ring -> tx_stats .tx_busy ++ ;
471- return ICE_XDP_CONSUMED ;
472- }
473-
474- dma = dma_map_single (xdp_ring -> dev , data , size , DMA_TO_DEVICE );
475- if (dma_mapping_error (xdp_ring -> dev , dma ))
476- return ICE_XDP_CONSUMED ;
477-
478- tx_buf = & xdp_ring -> tx_buf [i ];
479- tx_buf -> bytecount = size ;
480- tx_buf -> gso_segs = 1 ;
481- tx_buf -> raw_buf = data ;
482-
483- /* record length, and DMA address */
484- dma_unmap_len_set (tx_buf , len , size );
485- dma_unmap_addr_set (tx_buf , dma , dma );
486-
487- tx_desc = ICE_TX_DESC (xdp_ring , i );
488- tx_desc -> buf_addr = cpu_to_le64 (dma );
489- tx_desc -> cmd_type_offset_bsz = build_ctob (ICE_TXD_LAST_DESC_CMD , 0 ,
490- size , 0 );
491-
492- /* Make certain all of the status bits have been updated
493- * before next_to_watch is written.
494- */
495- smp_wmb ();
496-
497- i ++ ;
498- if (i == xdp_ring -> count )
499- i = 0 ;
500-
501- tx_buf -> next_to_watch = tx_desc ;
502- xdp_ring -> next_to_use = i ;
503-
504- return ICE_XDP_TX ;
505- }
506-
507- /**
508- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
509- * @xdp: XDP buffer
510- * @xdp_ring: XDP Tx ring
511- *
512- * Returns negative on failure, 0 on success.
513- */
514- static int ice_xmit_xdp_buff (struct xdp_buff * xdp , struct ice_ring * xdp_ring )
515- {
516- struct xdp_frame * xdpf = convert_to_xdp_frame (xdp );
517-
518- if (unlikely (!xdpf ))
519- return ICE_XDP_CONSUMED ;
520-
521- return ice_xmit_xdp_ring (xdpf -> data , xdpf -> len , xdp_ring );
522- }
523-
524411/**
525412 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
526413 * @rx_ring: Rx ring
@@ -612,29 +499,6 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
612499 return n - drops ;
613500}
614501
615- /**
616- * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
617- * @rx_ring: Rx ring
618- * @xdp_res: Result of the receive batch
619- *
620- * This function bumps XDP Tx tail and/or flush redirect map, and
621- * should be called when a batch of packets has been processed in the
622- * napi loop.
623- */
624- static void
625- ice_finalize_xdp_rx (struct ice_ring * rx_ring , unsigned int xdp_res )
626- {
627- if (xdp_res & ICE_XDP_REDIR )
628- xdp_do_flush_map ();
629-
630- if (xdp_res & ICE_XDP_TX ) {
631- struct ice_ring * xdp_ring =
632- rx_ring -> vsi -> xdp_rings [rx_ring -> q_index ];
633-
634- ice_xdp_ring_update_tail (xdp_ring );
635- }
636- }
637-
638502/**
639503 * ice_alloc_mapped_page - recycle or make a new page
640504 * @rx_ring: ring to use
@@ -1031,23 +895,6 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
1031895 return false;
1032896}
1033897
1034- /**
1035- * ice_test_staterr - tests bits in Rx descriptor status and error fields
1036- * @rx_desc: pointer to receive descriptor (in le64 format)
1037- * @stat_err_bits: value to mask
1038- *
1039- * This function does some fast chicanery in order to return the
1040- * value of the mask which is really only used for boolean tests.
1041- * The status_error_len doesn't need to be shifted because it begins
1042- * at offset zero.
1043- */
1044- static bool
1045- ice_test_staterr (union ice_32b_rx_flex_desc * rx_desc , const u16 stat_err_bits )
1046- {
1047- return !!(rx_desc -> wb .status_error0 &
1048- cpu_to_le16 (stat_err_bits ));
1049- }
1050-
1051898/**
1052899 * ice_is_non_eop - process handling of non-EOP buffers
1053900 * @rx_ring: Rx ring being processed
@@ -1073,154 +920,6 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1073920 return true;
1074921}
1075922
1076- /**
1077- * ice_ptype_to_htype - get a hash type
1078- * @ptype: the ptype value from the descriptor
1079- *
1080- * Returns a hash type to be used by skb_set_hash
1081- */
1082- static enum pkt_hash_types ice_ptype_to_htype (u8 __always_unused ptype )
1083- {
1084- return PKT_HASH_TYPE_NONE ;
1085- }
1086-
1087- /**
1088- * ice_rx_hash - set the hash value in the skb
1089- * @rx_ring: descriptor ring
1090- * @rx_desc: specific descriptor
1091- * @skb: pointer to current skb
1092- * @rx_ptype: the ptype value from the descriptor
1093- */
1094- static void
1095- ice_rx_hash (struct ice_ring * rx_ring , union ice_32b_rx_flex_desc * rx_desc ,
1096- struct sk_buff * skb , u8 rx_ptype )
1097- {
1098- struct ice_32b_rx_flex_desc_nic * nic_mdid ;
1099- u32 hash ;
1100-
1101- if (!(rx_ring -> netdev -> features & NETIF_F_RXHASH ))
1102- return ;
1103-
1104- if (rx_desc -> wb .rxdid != ICE_RXDID_FLEX_NIC )
1105- return ;
1106-
1107- nic_mdid = (struct ice_32b_rx_flex_desc_nic * )rx_desc ;
1108- hash = le32_to_cpu (nic_mdid -> rss_hash );
1109- skb_set_hash (skb , hash , ice_ptype_to_htype (rx_ptype ));
1110- }
1111-
1112- /**
1113- * ice_rx_csum - Indicate in skb if checksum is good
1114- * @ring: the ring we care about
1115- * @skb: skb currently being received and modified
1116- * @rx_desc: the receive descriptor
1117- * @ptype: the packet type decoded by hardware
1118- *
1119- * skb->protocol must be set before this function is called
1120- */
1121- static void
1122- ice_rx_csum (struct ice_ring * ring , struct sk_buff * skb ,
1123- union ice_32b_rx_flex_desc * rx_desc , u8 ptype )
1124- {
1125- struct ice_rx_ptype_decoded decoded ;
1126- u32 rx_error , rx_status ;
1127- bool ipv4 , ipv6 ;
1128-
1129- rx_status = le16_to_cpu (rx_desc -> wb .status_error0 );
1130- rx_error = rx_status ;
1131-
1132- decoded = ice_decode_rx_desc_ptype (ptype );
1133-
1134- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
1135- skb -> ip_summed = CHECKSUM_NONE ;
1136- skb_checksum_none_assert (skb );
1137-
1138- /* check if Rx checksum is enabled */
1139- if (!(ring -> netdev -> features & NETIF_F_RXCSUM ))
1140- return ;
1141-
1142- /* check if HW has decoded the packet and checksum */
1143- if (!(rx_status & BIT (ICE_RX_FLEX_DESC_STATUS0_L3L4P_S )))
1144- return ;
1145-
1146- if (!(decoded .known && decoded .outer_ip ))
1147- return ;
1148-
1149- ipv4 = (decoded .outer_ip == ICE_RX_PTYPE_OUTER_IP ) &&
1150- (decoded .outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4 );
1151- ipv6 = (decoded .outer_ip == ICE_RX_PTYPE_OUTER_IP ) &&
1152- (decoded .outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6 );
1153-
1154- if (ipv4 && (rx_error & (BIT (ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S ) |
1155- BIT (ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S ))))
1156- goto checksum_fail ;
1157- else if (ipv6 && (rx_status &
1158- (BIT (ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S ))))
1159- goto checksum_fail ;
1160-
1161- /* check for L4 errors and handle packets that were not able to be
1162- * checksummed due to arrival speed
1163- */
1164- if (rx_error & BIT (ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S ))
1165- goto checksum_fail ;
1166-
1167- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1168- switch (decoded .inner_prot ) {
1169- case ICE_RX_PTYPE_INNER_PROT_TCP :
1170- case ICE_RX_PTYPE_INNER_PROT_UDP :
1171- case ICE_RX_PTYPE_INNER_PROT_SCTP :
1172- skb -> ip_summed = CHECKSUM_UNNECESSARY ;
1173- default :
1174- break ;
1175- }
1176- return ;
1177-
1178- checksum_fail :
1179- ring -> vsi -> back -> hw_csum_rx_error ++ ;
1180- }
1181-
1182- /**
1183- * ice_process_skb_fields - Populate skb header fields from Rx descriptor
1184- * @rx_ring: Rx descriptor ring packet is being transacted on
1185- * @rx_desc: pointer to the EOP Rx descriptor
1186- * @skb: pointer to current skb being populated
1187- * @ptype: the packet type decoded by hardware
1188- *
1189- * This function checks the ring, descriptor, and packet information in
1190- * order to populate the hash, checksum, VLAN, protocol, and
1191- * other fields within the skb.
1192- */
1193- static void
1194- ice_process_skb_fields (struct ice_ring * rx_ring ,
1195- union ice_32b_rx_flex_desc * rx_desc ,
1196- struct sk_buff * skb , u8 ptype )
1197- {
1198- ice_rx_hash (rx_ring , rx_desc , skb , ptype );
1199-
1200- /* modifies the skb - consumes the enet header */
1201- skb -> protocol = eth_type_trans (skb , rx_ring -> netdev );
1202-
1203- ice_rx_csum (rx_ring , skb , rx_desc , ptype );
1204- }
1205-
1206- /**
1207- * ice_receive_skb - Send a completed packet up the stack
1208- * @rx_ring: Rx ring in play
1209- * @skb: packet to send up
1210- * @vlan_tag: VLAN tag for packet
1211- *
1212- * This function sends the completed packet (via. skb) up the stack using
1213- * gro receive functions (with/without VLAN tag)
1214- */
1215- static void
1216- ice_receive_skb (struct ice_ring * rx_ring , struct sk_buff * skb , u16 vlan_tag )
1217- {
1218- if ((rx_ring -> netdev -> features & NETIF_F_HW_VLAN_CTAG_RX ) &&
1219- (vlan_tag & VLAN_VID_MASK ))
1220- __vlan_hwaccel_put_tag (skb , htons (ETH_P_8021Q ), vlan_tag );
1221- napi_gro_receive (& rx_ring -> q_vector -> napi , skb );
1222- }
1223-
1224923/**
1225924 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1226925 * @rx_ring: Rx descriptor ring to transact packets on
0 commit comments