Skip to content

Commit d49187e

Browse files
Christoph Hellwigaxboe
authored andcommitted
nvme: introduce struct nvme_request
This adds a shared per-request structure for all NVMe I/O. This structure is embedded as the first member in all NVMe transport drivers request private data and allows to implement common functionality between the drivers. The first use is to replace the current abuse of the SCSI command passthrough fields in struct request for the NVMe command passthrough, but it will grow a field more fields to allow implementing things like common abort handlers in the future. The passthrough commands are handled by having a pointer to the SQE (struct nvme_command) in struct nvme_request, and the union of the possible result fields, which had to be turned from an anonymous into a named union for that purpose. This avoids having to pass a reference to a full CQE around and thus makes checking the result a lot more lightweight. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Keith Busch <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 41c9499 commit d49187e

File tree

11 files changed

+76
-86
lines changed

11 files changed

+76
-86
lines changed

drivers/nvme/host/core.c

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
221221

222222
req->cmd_type = REQ_TYPE_DRV_PRIV;
223223
req->cmd_flags |= REQ_FAILFAST_DRIVER;
224-
req->cmd = (unsigned char *)cmd;
225-
req->cmd_len = sizeof(struct nvme_command);
224+
nvme_req(req)->cmd = cmd;
226225

227226
return req;
228227
}
@@ -321,7 +320,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
321320
int ret = 0;
322321

323322
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
324-
memcpy(cmd, req->cmd, sizeof(*cmd));
323+
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
325324
else if (req_op(req) == REQ_OP_FLUSH)
326325
nvme_setup_flush(ns, cmd);
327326
else if (req_op(req) == REQ_OP_DISCARD)
@@ -338,7 +337,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
338337
* if the result is positive, it's an NVM Express status code
339338
*/
340339
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
341-
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
340+
union nvme_result *result, void *buffer, unsigned bufflen,
342341
unsigned timeout, int qid, int at_head, int flags)
343342
{
344343
struct request *req;
@@ -349,7 +348,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
349348
return PTR_ERR(req);
350349

351350
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
352-
req->special = cqe;
353351

354352
if (buffer && bufflen) {
355353
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -358,6 +356,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
358356
}
359357

360358
blk_execute_rq(req->q, NULL, req, at_head);
359+
if (result)
360+
*result = nvme_req(req)->result;
361361
ret = req->errors;
362362
out:
363363
blk_mq_free_request(req);
@@ -379,7 +379,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
379379
u32 *result, unsigned timeout)
380380
{
381381
bool write = nvme_is_write(cmd);
382-
struct nvme_completion cqe;
383382
struct nvme_ns *ns = q->queuedata;
384383
struct gendisk *disk = ns ? ns->disk : NULL;
385384
struct request *req;
@@ -392,7 +391,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
392391
return PTR_ERR(req);
393392

394393
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
395-
req->special = &cqe;
396394

397395
if (ubuffer && bufflen) {
398396
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -447,7 +445,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
447445
blk_execute_rq(req->q, disk, req, 0);
448446
ret = req->errors;
449447
if (result)
450-
*result = le32_to_cpu(cqe.result);
448+
*result = le32_to_cpu(nvme_req(req)->result.u32);
451449
if (meta && !ret && !write) {
452450
if (copy_to_user(meta_buffer, meta, meta_len))
453451
ret = -EFAULT;
@@ -596,37 +594,37 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
596594
void *buffer, size_t buflen, u32 *result)
597595
{
598596
struct nvme_command c;
599-
struct nvme_completion cqe;
597+
union nvme_result res;
600598
int ret;
601599

602600
memset(&c, 0, sizeof(c));
603601
c.features.opcode = nvme_admin_get_features;
604602
c.features.nsid = cpu_to_le32(nsid);
605603
c.features.fid = cpu_to_le32(fid);
606604

607-
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
605+
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
608606
NVME_QID_ANY, 0, 0);
609607
if (ret >= 0 && result)
610-
*result = le32_to_cpu(cqe.result);
608+
*result = le32_to_cpu(res.u32);
611609
return ret;
612610
}
613611

614612
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
615613
void *buffer, size_t buflen, u32 *result)
616614
{
617615
struct nvme_command c;
618-
struct nvme_completion cqe;
616+
union nvme_result res;
619617
int ret;
620618

621619
memset(&c, 0, sizeof(c));
622620
c.features.opcode = nvme_admin_set_features;
623621
c.features.fid = cpu_to_le32(fid);
624622
c.features.dword11 = cpu_to_le32(dword11);
625623

626-
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
624+
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
627625
buffer, buflen, 0, NVME_QID_ANY, 0, 0);
628626
if (ret >= 0 && result)
629-
*result = le32_to_cpu(cqe.result);
627+
*result = le32_to_cpu(res.u32);
630628
return ret;
631629
}
632630

@@ -1901,7 +1899,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl,
19011899
struct nvme_completion *cqe)
19021900
{
19031901
u16 status = le16_to_cpu(cqe->status) >> 1;
1904-
u32 result = le32_to_cpu(cqe->result);
1902+
u32 result = le32_to_cpu(cqe->result.u32);
19051903

19061904
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
19071905
++ctrl->event_limit;

drivers/nvme/host/fabrics.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -161,19 +161,19 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
161161
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
162162
{
163163
struct nvme_command cmd;
164-
struct nvme_completion cqe;
164+
union nvme_result res;
165165
int ret;
166166

167167
memset(&cmd, 0, sizeof(cmd));
168168
cmd.prop_get.opcode = nvme_fabrics_command;
169169
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
170170
cmd.prop_get.offset = cpu_to_le32(off);
171171

172-
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
172+
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
173173
NVME_QID_ANY, 0, 0);
174174

175175
if (ret >= 0)
176-
*val = le64_to_cpu(cqe.result64);
176+
*val = le64_to_cpu(res.u64);
177177
if (unlikely(ret != 0))
178178
dev_err(ctrl->device,
179179
"Property Get error: %d, offset %#x\n",
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
207207
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
208208
{
209209
struct nvme_command cmd;
210-
struct nvme_completion cqe;
210+
union nvme_result res;
211211
int ret;
212212

213213
memset(&cmd, 0, sizeof(cmd));
@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
216216
cmd.prop_get.attrib = 1;
217217
cmd.prop_get.offset = cpu_to_le32(off);
218218

219-
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
219+
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
220220
NVME_QID_ANY, 0, 0);
221221

222222
if (ret >= 0)
223-
*val = le64_to_cpu(cqe.result64);
223+
*val = le64_to_cpu(res.u64);
224224
if (unlikely(ret != 0))
225225
dev_err(ctrl->device,
226226
"Property Get error: %d, offset %#x\n",
@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
368368
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
369369
{
370370
struct nvme_command cmd;
371-
struct nvme_completion cqe;
371+
union nvme_result res;
372372
struct nvmf_connect_data *data;
373373
int ret;
374374

@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
400400
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
401401
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
402402

403-
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
403+
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
404404
data, sizeof(*data), 0, NVME_QID_ANY, 1,
405405
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
406406
if (ret) {
407-
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
407+
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
408408
&cmd, data);
409409
goto out_free_data;
410410
}
411411

412-
ctrl->cntlid = le16_to_cpu(cqe.result16);
412+
ctrl->cntlid = le16_to_cpu(res.u16);
413413

414414
out_free_data:
415415
kfree(data);
@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
441441
{
442442
struct nvme_command cmd;
443443
struct nvmf_connect_data *data;
444-
struct nvme_completion cqe;
444+
union nvme_result res;
445445
int ret;
446446

447447
memset(&cmd, 0, sizeof(cmd));
@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
459459
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
460460
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
461461

462-
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
462+
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
463463
data, sizeof(*data), 0, qid, 1,
464464
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
465465
if (ret) {
466-
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
466+
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
467467
&cmd, data);
468468
}
469469
kfree(data);

drivers/nvme/host/lightnvm.c

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -146,14 +146,6 @@ struct nvme_nvm_command {
146146
};
147147
};
148148

149-
struct nvme_nvm_completion {
150-
__le64 result; /* Used by LightNVM to return ppa completions */
151-
__le16 sq_head; /* how much of this queue may be reclaimed */
152-
__le16 sq_id; /* submission queue that generated this entry */
153-
__u16 command_id; /* of the command which completed */
154-
__le16 status; /* did the command fail, and if so, why? */
155-
};
156-
157149
#define NVME_NVM_LP_MLC_PAIRS 886
158150
struct nvme_nvm_lp_mlc {
159151
__le16 num_pairs;
@@ -481,11 +473,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
481473
static void nvme_nvm_end_io(struct request *rq, int error)
482474
{
483475
struct nvm_rq *rqd = rq->end_io_data;
484-
struct nvme_nvm_completion *cqe = rq->special;
485-
486-
if (cqe)
487-
rqd->ppa_status = le64_to_cpu(cqe->result);
488476

477+
rqd->ppa_status = nvme_req(rq)->result.u64;
489478
nvm_end_io(rqd, error);
490479

491480
kfree(rq->cmd);
@@ -500,20 +489,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
500489
struct bio *bio = rqd->bio;
501490
struct nvme_nvm_command *cmd;
502491

503-
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
504-
if (IS_ERR(rq))
492+
cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
493+
if (!cmd)
505494
return -ENOMEM;
506495

507-
cmd = kzalloc(sizeof(struct nvme_nvm_command) +
508-
sizeof(struct nvme_nvm_completion), GFP_KERNEL);
509-
if (!cmd) {
510-
blk_mq_free_request(rq);
496+
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
497+
if (IS_ERR(rq)) {
498+
kfree(cmd);
511499
return -ENOMEM;
512500
}
501+
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
513502

514-
rq->cmd_type = REQ_TYPE_DRV_PRIV;
515503
rq->ioprio = bio_prio(bio);
516-
517504
if (bio_has_data(bio))
518505
rq->nr_phys_segments = bio_phys_segments(q, bio);
519506

@@ -522,10 +509,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
522509

523510
nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
524511

525-
rq->cmd = (unsigned char *)cmd;
526-
rq->cmd_len = sizeof(struct nvme_nvm_command);
527-
rq->special = cmd + 1;
528-
529512
rq->end_io_data = rqd;
530513

531514
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

drivers/nvme/host/nvme.h

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,20 @@ enum nvme_quirks {
7979
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
8080
};
8181

82+
/*
83+
* Common request structure for NVMe passthrough. All drivers must have
84+
* this structure as the first member of their request-private data.
85+
*/
86+
struct nvme_request {
87+
struct nvme_command *cmd;
88+
union nvme_result result;
89+
};
90+
91+
static inline struct nvme_request *nvme_req(struct request *req)
92+
{
93+
return blk_mq_rq_to_pdu(req);
94+
}
95+
8296
/* The below value is the specific amount of delay needed before checking
8397
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
8498
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -278,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
278292
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
279293
void *buf, unsigned bufflen);
280294
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
281-
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
295+
union nvme_result *result, void *buffer, unsigned bufflen,
282296
unsigned timeout, int qid, int at_head, int flags);
283297
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
284298
void __user *ubuffer, unsigned bufflen, u32 *result,

drivers/nvme/host/pci.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ struct nvme_queue {
140140
* allocated to store the PRP list.
141141
*/
142142
struct nvme_iod {
143+
struct nvme_request req;
143144
struct nvme_queue *nvmeq;
144145
int aborted;
145146
int npages; /* In the PRP list. 0 means small pool in use */
@@ -707,8 +708,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
707708
}
708709

709710
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
710-
if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
711-
memcpy(req->special, &cqe, sizeof(cqe));
711+
nvme_req(req)->result = cqe.result;
712712
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
713713

714714
}

drivers/nvme/host/rdma.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ struct nvme_rdma_qe {
6666

6767
struct nvme_rdma_queue;
6868
struct nvme_rdma_request {
69+
struct nvme_request req;
6970
struct ib_mr *mr;
7071
struct nvme_rdma_qe sqe;
7172
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
@@ -1117,13 +1118,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
11171118
static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
11181119
struct nvme_completion *cqe, struct ib_wc *wc, int tag)
11191120
{
1120-
u16 status = le16_to_cpu(cqe->status);
11211121
struct request *rq;
11221122
struct nvme_rdma_request *req;
11231123
int ret = 0;
11241124

1125-
status >>= 1;
1126-
11271125
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
11281126
if (!rq) {
11291127
dev_err(queue->ctrl->ctrl.device,
@@ -1134,18 +1132,15 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
11341132
}
11351133
req = blk_mq_rq_to_pdu(rq);
11361134

1137-
if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
1138-
memcpy(rq->special, cqe, sizeof(*cqe));
1139-
11401135
if (rq->tag == tag)
11411136
ret = 1;
11421137

11431138
if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
11441139
wc->ex.invalidate_rkey == req->mr->rkey)
11451140
req->mr->need_inval = false;
11461141

1147-
blk_mq_complete_request(rq, status);
1148-
1142+
req->req.result = cqe->result;
1143+
blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
11491144
return ret;
11501145
}
11511146

0 commit comments

Comments
 (0)