1414#define MLX5_MAX_IRQ_NAME (32)
1515
1616struct mlx5_irq {
17+ u32 index ;
1718 struct atomic_notifier_head nh ;
1819 cpumask_var_t mask ;
1920 char name [MLX5_MAX_IRQ_NAME ];
2021 struct kref kref ;
2122 int irqn ;
23+ struct mlx5_irq_table * table ;
2224};
2325
2426struct mlx5_irq_table {
25- struct mlx5_irq * irq ;
27+ struct xarray irqs ;
2628 int nvec ;
2729};
2830
@@ -54,13 +56,6 @@ int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
5456 return table -> nvec - MLX5_IRQ_VEC_COMP_BASE ;
5557}
5658
57- static struct mlx5_irq * mlx5_irq_get (struct mlx5_core_dev * dev , int vecidx )
58- {
59- struct mlx5_irq_table * irq_table = mlx5_irq_table_get (dev );
60-
61- return & irq_table -> irq [vecidx ];
62- }
63-
6459/**
6560 * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
6661 * to be ssigned to each VF.
@@ -149,14 +144,17 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
149144static void irq_release (struct kref * kref )
150145{
151146 struct mlx5_irq * irq = container_of (kref , struct mlx5_irq , kref );
147+ struct mlx5_irq_table * table = irq -> table ;
152148
149+ xa_erase (& table -> irqs , irq -> index );
153150 /* free_irq requires that affinity and rmap will be cleared
154151 * before calling it. This is why there is asymmetry with set_rmap
155152 * which should be called after alloc_irq but before request_irq.
156153 */
157154 irq_set_affinity_hint (irq -> irqn , NULL );
158155 free_cpumask_var (irq -> mask );
159156 free_irq (irq -> irqn , & irq -> nh );
157+ kfree (irq );
160158}
161159
162160static void irq_put (struct mlx5_irq * irq )
@@ -203,13 +201,17 @@ static void irq_set_name(char *name, int vecidx)
203201 vecidx - MLX5_IRQ_VEC_COMP_BASE );
204202}
205203
206- static int irq_request (struct mlx5_core_dev * dev , int i )
204+ static struct mlx5_irq * irq_request (struct mlx5_core_dev * dev , int i )
207205{
206+ struct mlx5_irq_table * table = mlx5_irq_table_get (dev );
208207 char name [MLX5_MAX_IRQ_NAME ];
208+ struct xa_limit xa_num_irqs ;
209209 struct mlx5_irq * irq ;
210210 int err ;
211211
212- irq = mlx5_irq_get (dev , i );
212+ irq = kzalloc (sizeof (* irq ), GFP_KERNEL );
213+ if (!irq )
214+ return ERR_PTR (- ENOMEM );
213215 irq -> irqn = pci_irq_vector (dev -> pdev , i );
214216 irq_set_name (name , i );
215217 ATOMIC_INIT_NOTIFIER_HEAD (& irq -> nh );
@@ -226,15 +228,25 @@ static int irq_request(struct mlx5_core_dev *dev, int i)
226228 err = - ENOMEM ;
227229 goto err_cpumask ;
228230 }
231+ xa_num_irqs .min = 0 ;
232+ xa_num_irqs .max = table -> nvec ;
233+ err = xa_alloc (& table -> irqs , & irq -> index , irq , xa_num_irqs ,
234+ GFP_KERNEL );
235+ if (err ) {
236+ mlx5_core_err (dev , "Failed to alloc xa entry for irq(%u). err = %d\n" ,
237+ irq -> index , err );
238+ goto err_xa ;
239+ }
240+ irq -> table = table ;
229241 kref_init (& irq -> kref );
230- return 0 ;
231-
242+ return irq ;
243+ err_xa :
244+ free_cpumask_var (irq -> mask );
232245err_cpumask :
233246 free_irq (irq -> irqn , & irq -> nh );
234247err_req_irq :
235- if (i != 0 )
236- irq_set_affinity_notifier (irq -> irqn , NULL );
237- return err ;
248+ kfree (irq );
249+ return ERR_PTR (err );
238250}
239251
240252/**
@@ -259,25 +271,25 @@ void mlx5_irq_release(struct mlx5_irq *irq)
259271struct mlx5_irq * mlx5_irq_request (struct mlx5_core_dev * dev , int vecidx ,
260272 struct cpumask * affinity )
261273{
262- struct mlx5_irq_table * table = mlx5_irq_table_get (dev );
263- struct mlx5_irq * irq = & table -> irq [vecidx ];
264- int ret ;
274+ struct mlx5_irq_table * irq_table = mlx5_irq_table_get (dev );
275+ struct mlx5_irq * irq ;
265276
266- ret = kref_get_unless_zero (& irq -> kref );
267- if (ret )
277+ irq = xa_load (& irq_table -> irqs , vecidx );
278+ if (irq ) {
279+ kref_get (& irq -> kref );
280+ return irq ;
281+ }
282+ irq = irq_request (dev , vecidx );
283+ if (IS_ERR (irq ))
268284 return irq ;
269- ret = irq_request (dev , vecidx );
270- if (ret )
271- return ERR_PTR (ret );
272285 cpumask_copy (irq -> mask , affinity );
273286 irq_set_affinity_hint (irq -> irqn , irq -> mask );
274287 return irq ;
275288}
276289
277- struct cpumask *
278- mlx5_irq_get_affinity_mask (struct mlx5_irq_table * irq_table , int vecidx )
290+ struct cpumask * mlx5_irq_get_affinity_mask (struct mlx5_irq * irq )
279291{
280- return irq_table -> irq [ vecidx ]. mask ;
292+ return irq -> mask ;
281293}
282294
283295int mlx5_irq_table_create (struct mlx5_core_dev * dev )
@@ -299,9 +311,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
299311 if (nvec <= MLX5_IRQ_VEC_COMP_BASE )
300312 return - ENOMEM ;
301313
302- table -> irq = kcalloc (nvec , sizeof (* table -> irq ), GFP_KERNEL );
303- if (!table -> irq )
304- return - ENOMEM ;
314+ xa_init_flags (& table -> irqs , XA_FLAGS_ALLOC );
305315
306316 nvec = pci_alloc_irq_vectors (dev -> pdev , MLX5_IRQ_VEC_COMP_BASE + 1 ,
307317 nvec , PCI_IRQ_MSIX );
@@ -315,19 +325,26 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
315325 return 0 ;
316326
317327err_free_irq :
318- kfree ( table -> irq );
328+ xa_destroy ( & table -> irqs );
319329 return err ;
320330}
321331
322332void mlx5_irq_table_destroy (struct mlx5_core_dev * dev )
323333{
324334 struct mlx5_irq_table * table = dev -> priv .irq_table ;
335+ struct mlx5_irq * irq ;
336+ unsigned long index ;
325337
326338 if (mlx5_core_is_sf (dev ))
327339 return ;
328340
341+ /* There are cases where IRQs still will be in used when we reaching
342+ * to here. Hence, making sure all the irqs are realeased.
343+ */
344+ xa_for_each (& table -> irqs , index , irq )
345+ irq_release (& irq -> kref );
329346 pci_free_irq_vectors (dev -> pdev );
330- kfree ( table -> irq );
347+ xa_destroy ( & table -> irqs );
331348}
332349
333350struct mlx5_irq_table * mlx5_irq_table_get (struct mlx5_core_dev * dev )
0 commit comments