@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
9696 */
9797static void idpf_ctlq_shutdown (struct idpf_hw * hw , struct idpf_ctlq_info * cq )
9898{
99- mutex_lock (& cq -> cq_lock );
99+ spin_lock (& cq -> cq_lock );
100100
101101 /* free ring buffers and the ring itself */
102102 idpf_ctlq_dealloc_ring_res (hw , cq );
103103
104104 /* Set ring_size to 0 to indicate uninitialized queue */
105105 cq -> ring_size = 0 ;
106106
107- mutex_unlock (& cq -> cq_lock );
108- mutex_destroy (& cq -> cq_lock );
107+ spin_unlock (& cq -> cq_lock );
109108}
110109
111110/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173172
174173 idpf_ctlq_init_regs (hw , cq , is_rxq );
175174
176- mutex_init (& cq -> cq_lock );
175+ spin_lock_init (& cq -> cq_lock );
177176
178177 list_add (& cq -> cq_list , & hw -> cq_list_head );
179178
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272271 int err = 0 ;
273272 int i ;
274273
275- mutex_lock (& cq -> cq_lock );
274+ spin_lock (& cq -> cq_lock );
276275
277276 /* Ensure there are enough descriptors to send all messages */
278277 num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq );
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332331 wr32 (hw , cq -> reg .tail , cq -> next_to_use );
333332
334333err_unlock :
335- mutex_unlock (& cq -> cq_lock );
334+ spin_unlock (& cq -> cq_lock );
336335
337336 return err ;
338337}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364363 if (* clean_count > cq -> ring_size )
365364 return - EBADR ;
366365
367- mutex_lock (& cq -> cq_lock );
366+ spin_lock (& cq -> cq_lock );
368367
369368 ntc = cq -> next_to_clean ;
370369
@@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
394393
395394 cq -> next_to_clean = ntc ;
396395
397- mutex_unlock (& cq -> cq_lock );
396+ spin_unlock (& cq -> cq_lock );
398397
399398 /* Return number of descriptors actually cleaned */
400399 * clean_count = i ;
@@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
432431 if (* buff_count > 0 )
433432 buffs_avail = true;
434433
435- mutex_lock (& cq -> cq_lock );
434+ spin_lock (& cq -> cq_lock );
436435
437436 if (tbp >= cq -> ring_size )
438437 tbp = 0 ;
@@ -521,7 +520,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
521520 wr32 (hw , cq -> reg .tail , cq -> next_to_post );
522521 }
523522
524- mutex_unlock (& cq -> cq_lock );
523+ spin_unlock (& cq -> cq_lock );
525524
526525 /* return the number of buffers that were not posted */
527526 * buff_count = * buff_count - i ;
@@ -549,7 +548,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
549548 u16 i ;
550549
551550 /* take the lock before we start messing with the ring */
552- mutex_lock (& cq -> cq_lock );
551+ spin_lock (& cq -> cq_lock );
553552
554553 ntc = cq -> next_to_clean ;
555554
@@ -608,7 +607,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
608607
609608 cq -> next_to_clean = ntc ;
610609
611- mutex_unlock (& cq -> cq_lock );
610+ spin_unlock (& cq -> cq_lock );
612611
613612 * num_q_msg = i ;
614613 if (* num_q_msg == 0 )
0 commit comments