@@ -221,7 +221,7 @@ struct nvme_iod {
221
221
int npages ; /* In the PRP list. 0 means small pool in use */
222
222
int nents ; /* Used in scatterlist */
223
223
dma_addr_t first_dma ;
224
- struct scatterlist meta_sg ; /* metadata requires single contiguous buffer */
224
+ dma_addr_t meta_dma ;
225
225
struct scatterlist * sg ;
226
226
struct scatterlist inline_sg [0 ];
227
227
};
@@ -592,13 +592,16 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
592
592
dma_addr_t dma_addr = iod -> first_dma , next_dma_addr ;
593
593
int i ;
594
594
595
+ if (blk_integrity_rq (req )) {
596
+ dma_unmap_page (dev -> dev , iod -> meta_dma ,
597
+ rq_integrity_vec (req )-> bv_len , dma_dir );
598
+ }
599
+
595
600
if (iod -> nents ) {
596
601
/* P2PDMA requests do not need to be unmapped */
597
602
if (!is_pci_p2pdma_page (sg_page (iod -> sg )))
598
603
dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , dma_dir );
599
604
600
- if (blk_integrity_rq (req ))
601
- dma_unmap_sg (dev -> dev , & iod -> meta_sg , 1 , dma_dir );
602
605
}
603
606
604
607
if (iod -> npages == 0 )
@@ -861,17 +864,11 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
861
864
862
865
ret = BLK_STS_IOERR ;
863
866
if (blk_integrity_rq (req )) {
864
- if (blk_rq_count_integrity_sg (q , req -> bio ) != 1 )
865
- goto out ;
866
-
867
- sg_init_table (& iod -> meta_sg , 1 );
868
- if (blk_rq_map_integrity_sg (q , req -> bio , & iod -> meta_sg ) != 1 )
869
- goto out ;
870
-
871
- if (!dma_map_sg (dev -> dev , & iod -> meta_sg , 1 , dma_dir ))
867
+ iod -> meta_dma = dma_map_bvec (dev -> dev , rq_integrity_vec (req ),
868
+ dma_dir , 0 );
869
+ if (dma_mapping_error (dev -> dev , iod -> meta_dma ))
872
870
goto out ;
873
-
874
- cmnd -> rw .metadata = cpu_to_le64 (sg_dma_address (& iod -> meta_sg ));
871
+ cmnd -> rw .metadata = cpu_to_le64 (iod -> meta_dma );
875
872
}
876
873
877
874
return BLK_STS_OK ;
0 commit comments