Skip to content

Commit c00f62e

Browse files
jsmart-ghmartinkpetersen
authored andcommitted
scsi: lpfc: Merge per-protocol WQ/CQ pairs into single per-cpu pair
Currently, each hardware queue, typically allocated per-cpu, consists of a WQ/CQ pair per protocol. Meaning if both SCSI and NVMe are supported 2 WQ/CQ pairs will exist for the hardware queue. Separate queues are unnecessary. The current implementation wastes memory backing the 2nd set of queues, and the use of double the SLI-4 WQ/CQ's means less hardware queues can be supported which means there may not always be enough to have a pair per cpu. If there is only 1 pair per cpu, more cpu's may get their own WQ/CQ. Rework the implementation to use a single WQ/CQ pair by both protocols. Signed-off-by: Dick Kennedy <[email protected]> Signed-off-by: James Smart <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
1 parent 0d8af09 commit c00f62e

File tree

10 files changed

+218
-557
lines changed

10 files changed

+218
-557
lines changed

drivers/scsi/lpfc/lpfc.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -734,14 +734,13 @@ struct lpfc_hba {
734734
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
735735
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
736736
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
737-
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
737+
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
738738
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
739739
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
740740
#define HBA_FORCED_LINK_SPEED 0x40000 /*
741741
* Firmware supports Forced Link Speed
742742
* capability
743743
*/
744-
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
745744
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
746745

747746
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/

drivers/scsi/lpfc/lpfc_crtn.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
326326
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
327327
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
328328
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
329-
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
329+
void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
330330
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
331331
struct lpfc_dmabuf *);
332332
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,

drivers/scsi/lpfc/lpfc_debugfs.c

Lines changed: 11 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
416416
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
417417

418418
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
419-
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
420-
spin_lock(&qp->abts_nvme_buf_list_lock);
419+
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
421420
spin_lock(&qp->io_buf_list_get_lock);
422421
spin_lock(&qp->io_buf_list_put_lock);
423422
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
430429
qp->abts_nvme_io_bufs, out);
431430
spin_unlock(&qp->io_buf_list_put_lock);
432431
spin_unlock(&qp->io_buf_list_get_lock);
433-
spin_unlock(&qp->abts_nvme_buf_list_lock);
434-
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
432+
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
435433

436434
lpfc_debugfs_last_xripool++;
437435
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
533531
continue;
534532
pbl_pool = &multixri_pool->pbl_pool;
535533
pvt_pool = &multixri_pool->pvt_pool;
536-
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
537-
if (qp->nvme_wq)
538-
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
534+
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
539535

540536
scnprintf(tmp, sizeof(tmp),
541537
"%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -3786,23 +3782,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
37863782
int qidx;
37873783

37883784
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
3789-
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
3785+
qp = phba->sli4_hba.hdwq[qidx].io_wq;
37903786
if (qp->assoc_qid != cq_id)
37913787
continue;
37923788
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
37933789
if (*len >= max_cnt)
37943790
return 1;
37953791
}
3796-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3797-
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
3798-
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
3799-
if (qp->assoc_qid != cq_id)
3800-
continue;
3801-
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
3802-
if (*len >= max_cnt)
3803-
return 1;
3804-
}
3805-
}
38063792
return 0;
38073793
}
38083794

@@ -3868,38 +3854,21 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
38683854
struct lpfc_queue *qp;
38693855
int rc;
38703856

3871-
qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
3857+
qp = phba->sli4_hba.hdwq[eqidx].io_cq;
38723858

3873-
*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
3859+
*len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
38743860

38753861
/* Reset max counter */
38763862
qp->CQ_max_cqe = 0;
38773863

38783864
if (*len >= max_cnt)
38793865
return 1;
38803866

3881-
rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
3867+
rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
38823868
max_cnt, qp->queue_id);
38833869
if (rc)
38843870
return 1;
38853871

3886-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3887-
qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
3888-
3889-
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
3890-
3891-
/* Reset max counter */
3892-
qp->CQ_max_cqe = 0;
3893-
3894-
if (*len >= max_cnt)
3895-
return 1;
3896-
3897-
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
3898-
max_cnt, qp->queue_id);
3899-
if (rc)
3900-
return 1;
3901-
}
3902-
39033872
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
39043873
/* NVMET CQset */
39053874
qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4317,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
43484317
if (phba->sli4_hba.hdwq) {
43494318
for (qidx = 0; qidx < phba->cfg_hdw_queue;
43504319
qidx++) {
4351-
qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
4320+
qp = phba->sli4_hba.hdwq[qidx].io_cq;
43524321
if (qp && qp->queue_id == queid) {
43534322
/* Sanity check */
43544323
rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4329,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
43604329
}
43614330
}
43624331
}
4363-
/* NVME complete queue */
4364-
if (phba->sli4_hba.hdwq) {
4365-
qidx = 0;
4366-
do {
4367-
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
4368-
if (qp && qp->queue_id == queid) {
4369-
/* Sanity check */
4370-
rc = lpfc_idiag_que_param_check(
4371-
qp, index, count);
4372-
if (rc)
4373-
goto error_out;
4374-
idiag.ptr_private = qp;
4375-
goto pass_check;
4376-
}
4377-
} while (++qidx < phba->cfg_hdw_queue);
4378-
}
43794332
goto error_out;
43804333
break;
43814334
case LPFC_IDIAG_MQ:
@@ -4419,20 +4372,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
44194372
if (phba->sli4_hba.hdwq) {
44204373
/* FCP/SCSI work queue */
44214374
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
4422-
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
4423-
if (qp && qp->queue_id == queid) {
4424-
/* Sanity check */
4425-
rc = lpfc_idiag_que_param_check(
4426-
qp, index, count);
4427-
if (rc)
4428-
goto error_out;
4429-
idiag.ptr_private = qp;
4430-
goto pass_check;
4431-
}
4432-
}
4433-
/* NVME work queue */
4434-
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
4435-
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
4375+
qp = phba->sli4_hba.hdwq[qidx].io_wq;
44364376
if (qp && qp->queue_id == queid) {
44374377
/* Sanity check */
44384378
rc = lpfc_idiag_que_param_check(
@@ -6442,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
64426382
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
64436383

64446384
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6445-
lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
6446-
6447-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6448-
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6449-
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
6450-
}
6385+
lpfc_debug_dump_wq(phba, DUMP_IO, idx);
64516386

64526387
lpfc_debug_dump_hdr_rq(phba);
64536388
lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6394,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
64596394
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
64606395

64616396
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6462-
lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
6463-
6464-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6465-
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6466-
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
6467-
}
6397+
lpfc_debug_dump_cq(phba, DUMP_IO, idx);
64686398

64696399
/*
64706400
* Dump Event Queues (EQs)

drivers/scsi/lpfc/lpfc_debugfs.h

Lines changed: 17 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -291,8 +291,7 @@ struct lpfc_idiag {
291291
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
292292

293293
enum {
294-
DUMP_FCP,
295-
DUMP_NVME,
294+
DUMP_IO,
296295
DUMP_MBX,
297296
DUMP_ELS,
298297
DUMP_NVMELS,
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
415414
struct lpfc_queue *wq;
416415
char *qtypestr;
417416

418-
if (qtype == DUMP_FCP) {
419-
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
420-
qtypestr = "FCP";
421-
} else if (qtype == DUMP_NVME) {
422-
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
423-
qtypestr = "NVME";
417+
if (qtype == DUMP_IO) {
418+
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
419+
qtypestr = "IO";
424420
} else if (qtype == DUMP_MBX) {
425421
wq = phba->sli4_hba.mbx_wq;
426422
qtypestr = "MBX";
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
433429
} else
434430
return;
435431

436-
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
432+
if (qtype == DUMP_IO)
437433
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
438434
qtypestr, wqidx, wq->queue_id);
439435
else
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
459455
char *qtypestr;
460456
int eqidx;
461457

462-
/* fcp/nvme wq and cq are 1:1, thus same indexes */
458+
/* io wq and cq are 1:1, thus same indexes */
463459
eq = NULL;
464460

465-
if (qtype == DUMP_FCP) {
466-
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
467-
cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
468-
qtypestr = "FCP";
469-
} else if (qtype == DUMP_NVME) {
470-
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
471-
cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
472-
qtypestr = "NVME";
461+
if (qtype == DUMP_IO) {
462+
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
463+
cq = phba->sli4_hba.hdwq[wqidx].io_cq;
464+
qtypestr = "IO";
473465
} else if (qtype == DUMP_MBX) {
474466
wq = phba->sli4_hba.mbx_wq;
475467
cq = phba->sli4_hba.mbx_cq;
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
496488
eq = phba->sli4_hba.hdwq[0].hba_eq;
497489
}
498490

499-
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
491+
if (qtype == DUMP_IO)
500492
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
501493
"->EQ[Idx:%d|Qid:%d]:\n",
502494
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
572564
int wq_idx;
573565

574566
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
575-
if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
567+
if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
576568
break;
577569
if (wq_idx < phba->cfg_hdw_queue) {
578-
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
579-
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
580-
return;
581-
}
582-
583-
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
584-
if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
585-
break;
586-
if (wq_idx < phba->cfg_hdw_queue) {
587-
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
588-
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
570+
pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
571+
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
589572
return;
590573
}
591574

@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
654637
int cq_idx;
655638

656639
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
657-
if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
658-
break;
659-
660-
if (cq_idx < phba->cfg_hdw_queue) {
661-
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
662-
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
663-
return;
664-
}
665-
666-
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
667-
if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
640+
if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
668641
break;
669642

670643
if (cq_idx < phba->cfg_hdw_queue) {
671-
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
672-
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
644+
pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
645+
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
673646
return;
674647
}
675648

0 commit comments

Comments
 (0)