Skip to content

Commit 7bf5853

Browse files
Christoph Hellwigaxboe
authored andcommitted
nvme: don't pass the full CQE to nvme_complete_async_event
We only need the status and result fields, and passing them explicitly makes life a lot easier for the Fibre Channel transport which doesn't have a full CQE for the fast path case. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Keith Busch <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent d49187e commit 7bf5853

File tree

5 files changed

+21
-11
lines changed

5 files changed

+21
-11
lines changed

drivers/nvme/host/core.c

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work)
18951895
spin_unlock_irq(&ctrl->lock);
18961896
}
18971897

1898-
void nvme_complete_async_event(struct nvme_ctrl *ctrl,
1899-
struct nvme_completion *cqe)
1898+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
1899+
union nvme_result *res)
19001900
{
1901-
u16 status = le16_to_cpu(cqe->status) >> 1;
1902-
u32 result = le32_to_cpu(cqe->result.u32);
1901+
u32 result = le32_to_cpu(res->u32);
1902+
bool done = true;
19031903

1904-
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
1904+
switch (le16_to_cpu(status) >> 1) {
1905+
case NVME_SC_SUCCESS:
1906+
done = false;
1907+
/*FALLTHRU*/
1908+
case NVME_SC_ABORT_REQ:
19051909
++ctrl->event_limit;
19061910
schedule_work(&ctrl->async_event_work);
1911+
break;
1912+
default:
1913+
break;
19071914
}
19081915

1909-
if (status != NVME_SC_SUCCESS)
1916+
if (done)
19101917
return;
19111918

19121919
switch (result & 0xff07) {

drivers/nvme/host/nvme.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
275275
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
276276

277277
#define NVME_NR_AERS 1
278-
void nvme_complete_async_event(struct nvme_ctrl *ctrl,
279-
struct nvme_completion *cqe);
278+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
279+
union nvme_result *res);
280280
void nvme_queue_async_events(struct nvme_ctrl *ctrl);
281281

282282
void nvme_stop_queues(struct nvme_ctrl *ctrl);

drivers/nvme/host/pci.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -703,7 +703,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
703703
*/
704704
if (unlikely(nvmeq->qid == 0 &&
705705
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
706-
nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
706+
nvme_complete_async_event(&nvmeq->dev->ctrl,
707+
cqe.status, &cqe.result);
707708
continue;
708709
}
709710

drivers/nvme/host/rdma.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
11681168
*/
11691169
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
11701170
cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
1171-
nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
1171+
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1172+
&cqe->result);
11721173
else
11731174
ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
11741175
ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);

drivers/nvme/target/loop.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
127127
*/
128128
if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
129129
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
130-
nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
130+
nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
131+
&cqe->result);
131132
} else {
132133
struct request *rq = blk_mq_rq_from_pdu(iod);
133134

0 commit comments

Comments
 (0)