3434#include <net/tc_act/tc_mirred.h>
3535#include <net/vxlan.h>
3636#include <net/mpls.h>
37+ #include <net/xdp_sock.h>
3738
3839#include "ixgbe.h"
3940#include "ixgbe_common.h"
@@ -3176,7 +3177,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
31763177 per_ring_budget = budget ;
31773178
31783179 ixgbe_for_each_ring (ring , q_vector -> rx ) {
3179- int cleaned = ixgbe_clean_rx_irq (q_vector , ring ,
3180+ int cleaned = ring -> xsk_umem ?
3181+ ixgbe_clean_rx_irq_zc (q_vector , ring ,
3182+ per_ring_budget ) :
3183+ ixgbe_clean_rx_irq (q_vector , ring ,
31803184 per_ring_budget );
31813185
31823186 work_done += cleaned ;
@@ -3704,10 +3708,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
37043708 srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT ;
37053709
37063710 /* configure the packet buffer length */
3707- if (test_bit (__IXGBE_RX_3K_BUFFER , & rx_ring -> state ))
3711+ if (rx_ring -> xsk_umem ) {
3712+ u32 xsk_buf_len = rx_ring -> xsk_umem -> chunk_size_nohr -
3713+ XDP_PACKET_HEADROOM ;
3714+
3715+ /* If the MAC support setting RXDCTL.RLPML, the
3716+ * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
3717+ * RXDCTL.RLPML is set to the actual UMEM buffer
3718+ * size. If not, then we are stuck with a 1k buffer
3719+ * size resolution. In this case frames larger than
3720+ * the UMEM buffer size viewed in a 1k resolution will
3721+ * be dropped.
3722+ */
3723+ if (hw -> mac .type != ixgbe_mac_82599EB )
3724+ srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
3725+ else
3726+ srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
3727+ } else if (test_bit (__IXGBE_RX_3K_BUFFER , & rx_ring -> state )) {
37083728 srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
3709- else
3729+ } else {
37103730 srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
3731+ }
37113732
37123733 /* configure descriptor type */
37133734 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF ;
@@ -4030,6 +4051,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
40304051 u32 rxdctl ;
40314052 u8 reg_idx = ring -> reg_idx ;
40324053
4054+ xdp_rxq_info_unreg_mem_model (& ring -> xdp_rxq );
4055+ ring -> xsk_umem = ixgbe_xsk_umem (adapter , ring );
4056+ if (ring -> xsk_umem ) {
4057+ ring -> zca .free = ixgbe_zca_free ;
4058+ WARN_ON (xdp_rxq_info_reg_mem_model (& ring -> xdp_rxq ,
4059+ MEM_TYPE_ZERO_COPY ,
4060+ & ring -> zca ));
4061+
4062+ } else {
4063+ WARN_ON (xdp_rxq_info_reg_mem_model (& ring -> xdp_rxq ,
4064+ MEM_TYPE_PAGE_SHARED , NULL ));
4065+ }
4066+
40334067 /* disable queue to avoid use of these values while updating state */
40344068 rxdctl = IXGBE_READ_REG (hw , IXGBE_RXDCTL (reg_idx ));
40354069 rxdctl &= ~IXGBE_RXDCTL_ENABLE ;
@@ -4079,6 +4113,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
40794113#endif
40804114 }
40814115
4116+ if (ring -> xsk_umem && hw -> mac .type != ixgbe_mac_82599EB ) {
4117+ u32 xsk_buf_len = ring -> xsk_umem -> chunk_size_nohr -
4118+ XDP_PACKET_HEADROOM ;
4119+
4120+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
4121+ IXGBE_RXDCTL_RLPML_EN );
4122+ rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN ;
4123+
4124+ ring -> rx_buf_len = xsk_buf_len ;
4125+ }
4126+
40824127 /* initialize rx_buffer_info */
40834128 memset (ring -> rx_buffer_info , 0 ,
40844129 sizeof (struct ixgbe_rx_buffer ) * ring -> count );
@@ -4092,7 +4137,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
40924137 IXGBE_WRITE_REG (hw , IXGBE_RXDCTL (reg_idx ), rxdctl );
40934138
40944139 ixgbe_rx_desc_queue_enable (adapter , ring );
4095- ixgbe_alloc_rx_buffers (ring , ixgbe_desc_unused (ring ));
4140+ if (ring -> xsk_umem )
4141+ ixgbe_alloc_rx_buffers_zc (ring , ixgbe_desc_unused (ring ));
4142+ else
4143+ ixgbe_alloc_rx_buffers (ring , ixgbe_desc_unused (ring ));
40964144}
40974145
40984146static void ixgbe_setup_psrtype (struct ixgbe_adapter * adapter )
@@ -5206,6 +5254,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
52065254 u16 i = rx_ring -> next_to_clean ;
52075255 struct ixgbe_rx_buffer * rx_buffer = & rx_ring -> rx_buffer_info [i ];
52085256
5257+ if (rx_ring -> xsk_umem ) {
5258+ ixgbe_xsk_clean_rx_ring (rx_ring );
5259+ goto skip_free ;
5260+ }
5261+
52095262 /* Free all the Rx ring sk_buffs */
52105263 while (i != rx_ring -> next_to_alloc ) {
52115264 if (rx_buffer -> skb ) {
@@ -5244,6 +5297,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
52445297 }
52455298 }
52465299
5300+ skip_free :
52475301 rx_ring -> next_to_alloc = 0 ;
52485302 rx_ring -> next_to_clean = 0 ;
52495303 rx_ring -> next_to_use = 0 ;
@@ -6439,7 +6493,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
64396493 struct device * dev = rx_ring -> dev ;
64406494 int orig_node = dev_to_node (dev );
64416495 int ring_node = -1 ;
6442- int size , err ;
6496+ int size ;
64436497
64446498 size = sizeof (struct ixgbe_rx_buffer ) * rx_ring -> count ;
64456499
@@ -6476,13 +6530,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
64766530 rx_ring -> queue_index ) < 0 )
64776531 goto err ;
64786532
6479- err = xdp_rxq_info_reg_mem_model (& rx_ring -> xdp_rxq ,
6480- MEM_TYPE_PAGE_SHARED , NULL );
6481- if (err ) {
6482- xdp_rxq_info_unreg (& rx_ring -> xdp_rxq );
6483- goto err ;
6484- }
6485-
64866533 rx_ring -> xdp_prog = adapter -> xdp_prog ;
64876534
64886535 return 0 ;
@@ -10198,6 +10245,13 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1019810245 xdp -> prog_id = adapter -> xdp_prog ?
1019910246 adapter -> xdp_prog -> aux -> id : 0 ;
1020010247 return 0 ;
10248+ case XDP_QUERY_XSK_UMEM :
10249+ return ixgbe_xsk_umem_query (adapter , & xdp -> xsk .umem ,
10250+ xdp -> xsk .queue_id );
10251+ case XDP_SETUP_XSK_UMEM :
10252+ return ixgbe_xsk_umem_setup (adapter , xdp -> xsk .umem ,
10253+ xdp -> xsk .queue_id );
10254+
1020110255 default :
1020210256 return - EINVAL ;
1020310257 }
0 commit comments