@@ -48,7 +48,7 @@ snic_wq_cmpl_frame_send(struct vnic_wq *wq,
4848 SNIC_TRC (snic -> shost -> host_no , 0 , 0 ,
4949 ((ulong )(buf -> os_buf ) - sizeof (struct snic_req_info )), 0 , 0 ,
5050 0 );
51- pci_unmap_single ( snic -> pdev , buf -> dma_addr , buf -> len , PCI_DMA_TODEVICE );
51+
5252 buf -> os_buf = NULL ;
5353}
5454
@@ -137,13 +137,36 @@ snic_select_wq(struct snic *snic)
137137 return 0 ;
138138}
139139
140+ static int
141+ snic_wqdesc_avail (struct snic * snic , int q_num , int req_type )
142+ {
143+ int nr_wqdesc = snic -> config .wq_enet_desc_count ;
144+
145+ if (q_num > 0 ) {
146+ /*
147+ * Multi Queue case, additional care is required.
148+ * Per WQ active requests need to be maintained.
149+ */
150+ SNIC_HOST_INFO (snic -> shost , "desc_avail: Multi Queue case.\n" );
151+ SNIC_BUG_ON (q_num > 0 );
152+
153+ return -1 ;
154+ }
155+
156+ nr_wqdesc -= atomic64_read (& snic -> s_stats .fw .actv_reqs );
157+
158+ return ((req_type == SNIC_REQ_HBA_RESET ) ? nr_wqdesc : nr_wqdesc - 1 );
159+ }
160+
140161int
141162snic_queue_wq_desc (struct snic * snic , void * os_buf , u16 len )
142163{
143164 dma_addr_t pa = 0 ;
144165 unsigned long flags ;
145166 struct snic_fw_stats * fwstats = & snic -> s_stats .fw ;
167+ struct snic_host_req * req = (struct snic_host_req * ) os_buf ;
146168 long act_reqs ;
169+ long desc_avail = 0 ;
147170 int q_num = 0 ;
148171
149172 snic_print_desc (__func__ , os_buf , len );
@@ -156,11 +179,15 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
156179 return - ENOMEM ;
157180 }
158181
182+ req -> req_pa = (ulong )pa ;
183+
159184 q_num = snic_select_wq (snic );
160185
161186 spin_lock_irqsave (& snic -> wq_lock [q_num ], flags );
162- if (!svnic_wq_desc_avail (snic -> wq )) {
187+ desc_avail = snic_wqdesc_avail (snic , q_num , req -> hdr .type );
188+ if (desc_avail <= 0 ) {
163189 pci_unmap_single (snic -> pdev , pa , len , PCI_DMA_TODEVICE );
190+ req -> req_pa = 0 ;
164191 spin_unlock_irqrestore (& snic -> wq_lock [q_num ], flags );
165192 atomic64_inc (& snic -> s_stats .misc .wq_alloc_fail );
166193 SNIC_DBG ("host = %d, WQ is Full\n" , snic -> shost -> host_no );
@@ -169,10 +196,13 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
169196 }
170197
171198 snic_queue_wq_eth_desc (& snic -> wq [q_num ], os_buf , pa , len , 0 , 0 , 1 );
199+ /*
200+ * Update stats
201+ * note: when multi queue enabled, fw actv_reqs should be per queue.
202+ */
203+ act_reqs = atomic64_inc_return (& fwstats -> actv_reqs );
172204 spin_unlock_irqrestore (& snic -> wq_lock [q_num ], flags );
173205
174- /* Update stats */
175- act_reqs = atomic64_inc_return (& fwstats -> actv_reqs );
176206 if (act_reqs > atomic64_read (& fwstats -> max_actv_reqs ))
177207 atomic64_set (& fwstats -> max_actv_reqs , act_reqs );
178208
@@ -318,11 +348,31 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
318348 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n" ,
319349 rqi , rqi -> req , rqi -> abort_req , rqi -> dr_req );
320350
321- if (rqi -> abort_req )
351+ if (rqi -> abort_req ) {
352+ if (rqi -> abort_req -> req_pa )
353+ pci_unmap_single (snic -> pdev ,
354+ rqi -> abort_req -> req_pa ,
355+ sizeof (struct snic_host_req ),
356+ PCI_DMA_TODEVICE );
357+
322358 mempool_free (rqi -> abort_req , snic -> req_pool [SNIC_REQ_TM_CACHE ]);
359+ }
360+
361+ if (rqi -> dr_req ) {
362+ if (rqi -> dr_req -> req_pa )
363+ pci_unmap_single (snic -> pdev ,
364+ rqi -> dr_req -> req_pa ,
365+ sizeof (struct snic_host_req ),
366+ PCI_DMA_TODEVICE );
323367
324- if (rqi -> dr_req )
325368 mempool_free (rqi -> dr_req , snic -> req_pool [SNIC_REQ_TM_CACHE ]);
369+ }
370+
371+ if (rqi -> req -> req_pa )
372+ pci_unmap_single (snic -> pdev ,
373+ rqi -> req -> req_pa ,
374+ rqi -> req_len ,
375+ PCI_DMA_TODEVICE );
326376
327377 mempool_free (rqi , snic -> req_pool [rqi -> rq_pool_type ]);
328378}
0 commit comments