@@ -208,10 +208,10 @@ struct nvme_queue {
208208};
209209
210210/*
211- * The nvme_iod describes the data in an I/O, including the list of PRP
212- * entries. You can't see it in this data structure because C doesn't let
213- * me express that. Use nvme_init_iod to ensure there's enough space
214- * allocated to store the PRP list .
211+ * The nvme_iod describes the data in an I/O.
212+ *
213+ * The sg pointer contains the list of PRP/SGL chunk allocations in addition
214+ * to the actual struct scatterlist .
215215 */
216216struct nvme_iod {
217217 struct nvme_request req ;
@@ -583,29 +583,6 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
583583 return true;
584584}
585585
586- static blk_status_t nvme_init_iod (struct request * rq , struct nvme_dev * dev )
587- {
588- struct nvme_iod * iod = blk_mq_rq_to_pdu (rq );
589- int nseg = blk_rq_nr_phys_segments (rq );
590- unsigned int size = blk_rq_payload_bytes (rq );
591-
592- iod -> use_sgl = nvme_pci_use_sgls (dev , rq );
593-
594- if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES (dev )) {
595- iod -> sg = mempool_alloc (dev -> iod_mempool , GFP_ATOMIC );
596- if (!iod -> sg )
597- return BLK_STS_RESOURCE ;
598- } else {
599- iod -> sg = iod -> inline_sg ;
600- }
601-
602- iod -> aborted = 0 ;
603- iod -> npages = -1 ;
604- iod -> nents = 0 ;
605-
606- return BLK_STS_OK ;
607- }
608-
609586static void nvme_free_iod (struct nvme_dev * dev , struct request * req )
610587{
611588 struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
@@ -837,6 +814,17 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
837814 blk_status_t ret = BLK_STS_IOERR ;
838815 int nr_mapped ;
839816
817+ if (blk_rq_payload_bytes (req ) > NVME_INT_BYTES (dev ) ||
818+ blk_rq_nr_phys_segments (req ) > NVME_INT_PAGES ) {
819+ iod -> sg = mempool_alloc (dev -> iod_mempool , GFP_ATOMIC );
820+ if (!iod -> sg )
821+ return BLK_STS_RESOURCE ;
822+ } else {
823+ iod -> sg = iod -> inline_sg ;
824+ }
825+
826+ iod -> use_sgl = nvme_pci_use_sgls (dev , req );
827+
840828 sg_init_table (iod -> sg , blk_rq_nr_phys_segments (req ));
841829 iod -> nents = blk_rq_map_sg (q , req , iod -> sg );
842830 if (!iod -> nents )
@@ -881,6 +869,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
881869out_unmap :
882870 dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , dma_dir );
883871out :
872+ nvme_free_iod (dev , req );
884873 return ret ;
885874}
886875
@@ -913,9 +902,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
913902 struct nvme_queue * nvmeq = hctx -> driver_data ;
914903 struct nvme_dev * dev = nvmeq -> dev ;
915904 struct request * req = bd -> rq ;
905+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
916906 struct nvme_command cmnd ;
917907 blk_status_t ret ;
918908
909+ iod -> aborted = 0 ;
910+ iod -> npages = -1 ;
911+ iod -> nents = 0 ;
912+
919913 /*
920914 * We should not need to do this, but we're still using this to
921915 * ensure we can drain requests on a dying queue.
@@ -927,21 +921,15 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
927921 if (ret )
928922 return ret ;
929923
930- ret = nvme_init_iod (req , dev );
931- if (ret )
932- goto out_free_cmd ;
933-
934924 if (blk_rq_nr_phys_segments (req )) {
935925 ret = nvme_map_data (dev , req , & cmnd );
936926 if (ret )
937- goto out_cleanup_iod ;
927+ goto out_free_cmd ;
938928 }
939929
940930 blk_mq_start_request (req );
941931 nvme_submit_cmd (nvmeq , & cmnd , bd -> last );
942932 return BLK_STS_OK ;
943- out_cleanup_iod :
944- nvme_free_iod (dev , req );
945933out_free_cmd :
946934 nvme_cleanup_cmd (req );
947935 return ret ;
0 commit comments