@@ -447,6 +447,7 @@ static void ice_vsi_free(struct ice_vsi *vsi)
447447
448448 ice_vsi_free_stats (vsi );
449449 ice_vsi_free_arrays (vsi );
450+ mutex_destroy (& vsi -> xdp_state_lock );
450451 mutex_unlock (& pf -> sw_mutex );
451452 devm_kfree (dev , vsi );
452453}
@@ -626,6 +627,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
626627 pf -> next_vsi = ice_get_free_slot (pf -> vsi , pf -> num_alloc_vsi ,
627628 pf -> next_vsi );
628629
630+ mutex_init (& vsi -> xdp_state_lock );
631+
629632unlock_pf :
630633 mutex_unlock (& pf -> sw_mutex );
631634 return vsi ;
@@ -2286,9 +2289,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
22862289
22872290 ice_vsi_map_rings_to_vectors (vsi );
22882291
2289- /* Associate q_vector rings to napi */
2290- ice_vsi_set_napi_queues (vsi );
2291-
22922292 vsi -> stat_offsets_loaded = false;
22932293
22942294 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
@@ -2426,7 +2426,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
24262426 dev_err (ice_pf_to_dev (pf ), "Failed to remove RDMA scheduler config for VSI %u, err %d\n" ,
24272427 vsi -> vsi_num , err );
24282428
2429- if (ice_is_xdp_ena_vsi ( vsi ) )
2429+ if (vsi -> xdp_rings )
24302430 /* return value check can be skipped here, it always returns
24312431 * 0 if reset is in progress
24322432 */
@@ -2528,7 +2528,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
25282528 for (q = 0 ; q < q_vector -> num_ring_tx ; q ++ ) {
25292529 ice_write_itr (& q_vector -> tx , 0 );
25302530 wr32 (hw , QINT_TQCTL (vsi -> txq_map [txq ]), 0 );
2531- if (ice_is_xdp_ena_vsi ( vsi ) ) {
2531+ if (vsi -> xdp_rings ) {
25322532 u32 xdp_txq = txq + vsi -> num_xdp_txq ;
25332533
25342534 wr32 (hw , QINT_TQCTL (vsi -> txq_map [xdp_txq ]), 0 );
@@ -2628,6 +2628,7 @@ void ice_vsi_close(struct ice_vsi *vsi)
26282628 if (!test_and_set_bit (ICE_VSI_DOWN , vsi -> state ))
26292629 ice_down (vsi );
26302630
2631+ ice_vsi_clear_napi_queues (vsi );
26312632 ice_vsi_free_irq (vsi );
26322633 ice_vsi_free_tx_rings (vsi );
26332634 ice_vsi_free_rx_rings (vsi );
@@ -2671,143 +2672,78 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
26712672 */
26722673void ice_dis_vsi (struct ice_vsi * vsi , bool locked )
26732674{
2674- if (test_bit (ICE_VSI_DOWN , vsi -> state ))
2675- return ;
2675+ bool already_down = test_bit (ICE_VSI_DOWN , vsi -> state );
26762676
26772677 set_bit (ICE_VSI_NEEDS_RESTART , vsi -> state );
26782678
26792679 if (vsi -> type == ICE_VSI_PF && vsi -> netdev ) {
26802680 if (netif_running (vsi -> netdev )) {
26812681 if (!locked )
26822682 rtnl_lock ();
2683-
2684- ice_vsi_close (vsi );
2683+ already_down = test_bit (ICE_VSI_DOWN , vsi -> state );
2684+ if (!already_down )
2685+ ice_vsi_close (vsi );
26852686
26862687 if (!locked )
26872688 rtnl_unlock ();
2688- } else {
2689+ } else if (! already_down ) {
26892690 ice_vsi_close (vsi );
26902691 }
2691- } else if (vsi -> type == ICE_VSI_CTRL ) {
2692+ } else if (vsi -> type == ICE_VSI_CTRL && ! already_down ) {
26922693 ice_vsi_close (vsi );
26932694 }
26942695}
26952696
26962697/**
2697- * __ice_queue_set_napi - Set the napi instance for the queue
2698- * @dev: device to which NAPI and queue belong
2699- * @queue_index: Index of queue
2700- * @type: queue type as RX or TX
2701- * @napi: NAPI context
2702- * @locked: is the rtnl_lock already held
2703- *
2704- * Set the napi instance for the queue. Caller indicates the lock status.
2705- */
2706- static void
2707- __ice_queue_set_napi (struct net_device * dev , unsigned int queue_index ,
2708- enum netdev_queue_type type , struct napi_struct * napi ,
2709- bool locked )
2710- {
2711- if (!locked )
2712- rtnl_lock ();
2713- netif_queue_set_napi (dev , queue_index , type , napi );
2714- if (!locked )
2715- rtnl_unlock ();
2716- }
2717-
2718- /**
2719- * ice_queue_set_napi - Set the napi instance for the queue
2720- * @vsi: VSI being configured
2721- * @queue_index: Index of queue
2722- * @type: queue type as RX or TX
2723- * @napi: NAPI context
2698+ * ice_vsi_set_napi_queues - associate netdev queues with napi
2699+ * @vsi: VSI pointer
27242700 *
2725- * Set the napi instance for the queue. The rtnl lock state is derived from the
2726- * execution path .
2701+ * Associate queue[s] with napi for all vectors.
2702+ * The caller must hold rtnl_lock .
27272703 */
2728- void
2729- ice_queue_set_napi (struct ice_vsi * vsi , unsigned int queue_index ,
2730- enum netdev_queue_type type , struct napi_struct * napi )
2704+ void ice_vsi_set_napi_queues (struct ice_vsi * vsi )
27312705{
2732- struct ice_pf * pf = vsi -> back ;
2706+ struct net_device * netdev = vsi -> netdev ;
2707+ int q_idx , v_idx ;
27332708
2734- if (!vsi -> netdev )
2709+ if (!netdev )
27352710 return ;
27362711
2737- if (current_work () == & pf -> serv_task ||
2738- test_bit (ICE_PREPARED_FOR_RESET , pf -> state ) ||
2739- test_bit (ICE_DOWN , pf -> state ) ||
2740- test_bit (ICE_SUSPENDED , pf -> state ))
2741- __ice_queue_set_napi (vsi -> netdev , queue_index , type , napi ,
2742- false);
2743- else
2744- __ice_queue_set_napi (vsi -> netdev , queue_index , type , napi ,
2745- true);
2746- }
2712+ ice_for_each_rxq (vsi , q_idx )
2713+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_RX ,
2714+ & vsi -> rx_rings [q_idx ]-> q_vector -> napi );
27472715
2748- /**
2749- * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2750- * @q_vector: q_vector pointer
2751- * @locked: is the rtnl_lock already held
2752- *
2753- * Associate the q_vector napi with all the queue[s] on the vector.
2754- * Caller indicates the lock status.
2755- */
2756- void __ice_q_vector_set_napi_queues (struct ice_q_vector * q_vector , bool locked )
2757- {
2758- struct ice_rx_ring * rx_ring ;
2759- struct ice_tx_ring * tx_ring ;
2760-
2761- ice_for_each_rx_ring (rx_ring , q_vector -> rx )
2762- __ice_queue_set_napi (q_vector -> vsi -> netdev , rx_ring -> q_index ,
2763- NETDEV_QUEUE_TYPE_RX , & q_vector -> napi ,
2764- locked );
2765-
2766- ice_for_each_tx_ring (tx_ring , q_vector -> tx )
2767- __ice_queue_set_napi (q_vector -> vsi -> netdev , tx_ring -> q_index ,
2768- NETDEV_QUEUE_TYPE_TX , & q_vector -> napi ,
2769- locked );
2716+ ice_for_each_txq (vsi , q_idx )
2717+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_TX ,
2718+ & vsi -> tx_rings [q_idx ]-> q_vector -> napi );
27702719 /* Also set the interrupt number for the NAPI */
2771- netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2772- }
2773-
2774- /**
2775- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2776- * @q_vector: q_vector pointer
2777- *
2778- * Associate the q_vector napi with all the queue[s] on the vector
2779- */
2780- void ice_q_vector_set_napi_queues (struct ice_q_vector * q_vector )
2781- {
2782- struct ice_rx_ring * rx_ring ;
2783- struct ice_tx_ring * tx_ring ;
2784-
2785- ice_for_each_rx_ring (rx_ring , q_vector -> rx )
2786- ice_queue_set_napi (q_vector -> vsi , rx_ring -> q_index ,
2787- NETDEV_QUEUE_TYPE_RX , & q_vector -> napi );
2720+ ice_for_each_q_vector (vsi , v_idx ) {
2721+ struct ice_q_vector * q_vector = vsi -> q_vectors [v_idx ];
27882722
2789- ice_for_each_tx_ring (tx_ring , q_vector -> tx )
2790- ice_queue_set_napi (q_vector -> vsi , tx_ring -> q_index ,
2791- NETDEV_QUEUE_TYPE_TX , & q_vector -> napi );
2792- /* Also set the interrupt number for the NAPI */
2793- netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2723+ netif_napi_set_irq (& q_vector -> napi , q_vector -> irq .virq );
2724+ }
27942725}
27952726
27962727/**
2797- * ice_vsi_set_napi_queues
2728+ * ice_vsi_clear_napi_queues - dissociate netdev queues from napi
27982729 * @vsi: VSI pointer
27992730 *
2800- * Associate queue[s] with napi for all vectors
2731+ * Clear the association between all VSI queues queue[s] and napi.
2732+ * The caller must hold rtnl_lock.
28012733 */
2802- void ice_vsi_set_napi_queues (struct ice_vsi * vsi )
2734+ void ice_vsi_clear_napi_queues (struct ice_vsi * vsi )
28032735{
2804- int i ;
2736+ struct net_device * netdev = vsi -> netdev ;
2737+ int q_idx ;
28052738
2806- if (!vsi -> netdev )
2739+ if (!netdev )
28072740 return ;
28082741
2809- ice_for_each_q_vector (vsi , i )
2810- ice_q_vector_set_napi_queues (vsi -> q_vectors [i ]);
2742+ ice_for_each_txq (vsi , q_idx )
2743+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_TX , NULL );
2744+
2745+ ice_for_each_rxq (vsi , q_idx )
2746+ netif_queue_set_napi (netdev , q_idx , NETDEV_QUEUE_TYPE_RX , NULL );
28112747}
28122748
28132749/**
@@ -3039,42 +2975,47 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
30392975 if (WARN_ON (vsi -> type == ICE_VSI_VF && !vsi -> vf ))
30402976 return - EINVAL ;
30412977
2978+ mutex_lock (& vsi -> xdp_state_lock );
2979+
30422980 ret = ice_vsi_realloc_stat_arrays (vsi );
30432981 if (ret )
3044- goto err_vsi_cfg ;
2982+ goto unlock ;
30452983
30462984 ice_vsi_decfg (vsi );
30472985 ret = ice_vsi_cfg_def (vsi );
30482986 if (ret )
3049- goto err_vsi_cfg ;
2987+ goto unlock ;
30502988
30512989 coalesce = kcalloc (vsi -> num_q_vectors ,
30522990 sizeof (struct ice_coalesce_stored ), GFP_KERNEL );
3053- if (!coalesce )
3054- return - ENOMEM ;
2991+ if (!coalesce ) {
2992+ ret = - ENOMEM ;
2993+ goto decfg ;
2994+ }
30552995
30562996 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce (vsi , coalesce );
30572997
30582998 ret = ice_vsi_cfg_tc_lan (pf , vsi );
30592999 if (ret ) {
30603000 if (vsi_flags & ICE_VSI_FLAG_INIT ) {
30613001 ret = - EIO ;
3062- goto err_vsi_cfg_tc_lan ;
3002+ goto free_coalesce ;
30633003 }
30643004
3065- kfree ( coalesce );
3066- return ice_schedule_reset ( pf , ICE_RESET_PFR ) ;
3005+ ret = ice_schedule_reset ( pf , ICE_RESET_PFR );
3006+ goto free_coalesce ;
30673007 }
30683008
30693009 ice_vsi_rebuild_set_coalesce (vsi , coalesce , prev_num_q_vectors );
3070- kfree (coalesce );
3071-
3072- return 0 ;
3010+ clear_bit (ICE_VSI_REBUILD_PENDING , vsi -> state );
30733011
3074- err_vsi_cfg_tc_lan :
3075- ice_vsi_decfg (vsi );
3012+ free_coalesce :
30763013 kfree (coalesce );
3077- err_vsi_cfg :
3014+ decfg :
3015+ if (ret )
3016+ ice_vsi_decfg (vsi );
3017+ unlock :
3018+ mutex_unlock (& vsi -> xdp_state_lock );
30783019 return ret ;
30793020}
30803021
0 commit comments