2323#include <linux/t10-pi.h>
2424#include <linux/types.h>
2525#include <linux/io-64-nonatomic-lo-hi.h>
26+ #include <linux/io-64-nonatomic-hi-lo.h>
2627#include <linux/sed-opal.h>
2728#include <linux/pci-p2pdma.h>
2829
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
542543 return true;
543544}
544545
545- static void nvme_unmap_data (struct nvme_dev * dev , struct request * req )
546+ static void nvme_free_prps (struct nvme_dev * dev , struct request * req )
546547{
547- struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
548548 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof (__le64 ) - 1 ;
549- dma_addr_t dma_addr = iod -> first_dma , next_dma_addr ;
549+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
550+ dma_addr_t dma_addr = iod -> first_dma ;
550551 int i ;
551552
552- if (iod -> dma_len ) {
553- dma_unmap_page (dev -> dev , dma_addr , iod -> dma_len ,
554- rq_dma_dir (req ));
555- return ;
553+ for (i = 0 ; i < iod -> npages ; i ++ ) {
554+ __le64 * prp_list = nvme_pci_iod_list (req )[i ];
555+ dma_addr_t next_dma_addr = le64_to_cpu (prp_list [last_prp ]);
556+
557+ dma_pool_free (dev -> prp_page_pool , prp_list , dma_addr );
558+ dma_addr = next_dma_addr ;
556559 }
557560
558- WARN_ON_ONCE (! iod -> nents );
561+ }
559562
560- if (is_pci_p2pdma_page (sg_page (iod -> sg )))
561- pci_p2pdma_unmap_sg (dev -> dev , iod -> sg , iod -> nents ,
562- rq_dma_dir (req ));
563- else
564- dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , rq_dma_dir (req ));
563+ static void nvme_free_sgls (struct nvme_dev * dev , struct request * req )
564+ {
565+ const int last_sg = SGES_PER_PAGE - 1 ;
566+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
567+ dma_addr_t dma_addr = iod -> first_dma ;
568+ int i ;
565569
570+ for (i = 0 ; i < iod -> npages ; i ++ ) {
571+ struct nvme_sgl_desc * sg_list = nvme_pci_iod_list (req )[i ];
572+ dma_addr_t next_dma_addr = le64_to_cpu ((sg_list [last_sg ]).addr );
566573
567- if ( iod -> npages == 0 )
568- dma_pool_free ( dev -> prp_small_pool , nvme_pci_iod_list ( req )[ 0 ],
569- dma_addr );
574+ dma_pool_free ( dev -> prp_page_pool , sg_list , dma_addr );
575+ dma_addr = next_dma_addr ;
576+ }
570577
571- for (i = 0 ; i < iod -> npages ; i ++ ) {
572- void * addr = nvme_pci_iod_list (req )[i ];
578+ }
573579
574- if (iod -> use_sgl ) {
575- struct nvme_sgl_desc * sg_list = addr ;
580+ static void nvme_unmap_sg (struct nvme_dev * dev , struct request * req )
581+ {
582+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
576583
577- next_dma_addr =
578- le64_to_cpu ((sg_list [SGES_PER_PAGE - 1 ]).addr );
579- } else {
580- __le64 * prp_list = addr ;
584+ if (is_pci_p2pdma_page (sg_page (iod -> sg )))
585+ pci_p2pdma_unmap_sg (dev -> dev , iod -> sg , iod -> nents ,
586+ rq_dma_dir (req ));
587+ else
588+ dma_unmap_sg (dev -> dev , iod -> sg , iod -> nents , rq_dma_dir (req ));
589+ }
581590
582- next_dma_addr = le64_to_cpu (prp_list [last_prp ]);
583- }
591+ static void nvme_unmap_data (struct nvme_dev * dev , struct request * req )
592+ {
593+ struct nvme_iod * iod = blk_mq_rq_to_pdu (req );
584594
585- dma_pool_free (dev -> prp_page_pool , addr , dma_addr );
586- dma_addr = next_dma_addr ;
595+ if (iod -> dma_len ) {
596+ dma_unmap_page (dev -> dev , iod -> first_dma , iod -> dma_len ,
597+ rq_dma_dir (req ));
598+ return ;
587599 }
588600
601+ WARN_ON_ONCE (!iod -> nents );
602+
603+ nvme_unmap_sg (dev , req );
604+ if (iod -> npages == 0 )
605+ dma_pool_free (dev -> prp_small_pool , nvme_pci_iod_list (req )[0 ],
606+ iod -> first_dma );
607+ else if (iod -> use_sgl )
608+ nvme_free_sgls (dev , req );
609+ else
610+ nvme_free_prps (dev , req );
589611 mempool_free (iod -> sg , dev -> iod_mempool );
590612}
591613
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
661683 __le64 * old_prp_list = prp_list ;
662684 prp_list = dma_pool_alloc (pool , GFP_ATOMIC , & prp_dma );
663685 if (!prp_list )
664- return BLK_STS_RESOURCE ;
686+ goto free_prps ;
665687 list [iod -> npages ++ ] = prp_list ;
666688 prp_list [0 ] = old_prp_list [i - 1 ];
667689 old_prp_list [i - 1 ] = cpu_to_le64 (prp_dma );
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
681703 dma_addr = sg_dma_address (sg );
682704 dma_len = sg_dma_len (sg );
683705 }
684-
685706done :
686707 cmnd -> dptr .prp1 = cpu_to_le64 (sg_dma_address (iod -> sg ));
687708 cmnd -> dptr .prp2 = cpu_to_le64 (iod -> first_dma );
688-
689709 return BLK_STS_OK ;
690-
691- bad_sgl :
710+ free_prps :
711+ nvme_free_prps (dev , req );
712+ return BLK_STS_RESOURCE ;
713+ bad_sgl :
692714 WARN (DO_ONCE (nvme_print_sgl , iod -> sg , iod -> nents ),
693715 "Invalid SGL for payload:%d nents:%d\n" ,
694716 blk_rq_payload_bytes (req ), iod -> nents );
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
760782
761783 sg_list = dma_pool_alloc (pool , GFP_ATOMIC , & sgl_dma );
762784 if (!sg_list )
763- return BLK_STS_RESOURCE ;
785+ goto free_sgls ;
764786
765787 i = 0 ;
766788 nvme_pci_iod_list (req )[iod -> npages ++ ] = sg_list ;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
773795 } while (-- entries > 0 );
774796
775797 return BLK_STS_OK ;
798+ free_sgls :
799+ nvme_free_sgls (dev , req );
800+ return BLK_STS_RESOURCE ;
776801}
777802
778803static blk_status_t nvme_setup_prp_simple (struct nvme_dev * dev ,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
841866 sg_init_table (iod -> sg , blk_rq_nr_phys_segments (req ));
842867 iod -> nents = blk_rq_map_sg (req -> q , req , iod -> sg );
843868 if (!iod -> nents )
844- goto out ;
869+ goto out_free_sg ;
845870
846871 if (is_pci_p2pdma_page (sg_page (iod -> sg )))
847872 nr_mapped = pci_p2pdma_map_sg_attrs (dev -> dev , iod -> sg ,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
850875 nr_mapped = dma_map_sg_attrs (dev -> dev , iod -> sg , iod -> nents ,
851876 rq_dma_dir (req ), DMA_ATTR_NO_WARN );
852877 if (!nr_mapped )
853- goto out ;
878+ goto out_free_sg ;
854879
855880 iod -> use_sgl = nvme_pci_use_sgls (dev , req );
856881 if (iod -> use_sgl )
857882 ret = nvme_pci_setup_sgls (dev , req , & cmnd -> rw , nr_mapped );
858883 else
859884 ret = nvme_pci_setup_prps (dev , req , & cmnd -> rw );
860- out :
861885 if (ret != BLK_STS_OK )
862- nvme_unmap_data (dev , req );
886+ goto out_unmap_sg ;
887+ return BLK_STS_OK ;
888+
889+ out_unmap_sg :
890+ nvme_unmap_sg (dev , req );
891+ out_free_sg :
892+ mempool_free (iod -> sg , dev -> iod_mempool );
863893 return ret ;
864894}
865895
@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
17951825 if (dev -> cmb_size )
17961826 return ;
17971827
1828+ if (NVME_CAP_CMBS (dev -> ctrl .cap ))
1829+ writel (NVME_CMBMSC_CRE , dev -> bar + NVME_REG_CMBMSC );
1830+
17981831 dev -> cmbsz = readl (dev -> bar + NVME_REG_CMBSZ );
17991832 if (!dev -> cmbsz )
18001833 return ;
@@ -1808,6 +1841,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
18081841 if (offset > bar_size )
18091842 return ;
18101843
1844+ /*
1845+ * Tell the controller about the host side address mapping the CMB,
1846+ * and enable CMB decoding for the NVMe 1.4+ scheme:
1847+ */
1848+ if (NVME_CAP_CMBS (dev -> ctrl .cap )) {
1849+ hi_lo_writeq (NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
1850+ (pci_bus_address (pdev , bar ) + offset ),
1851+ dev -> bar + NVME_REG_CMBMSC );
1852+ }
1853+
18111854 /*
18121855 * Controllers may support a CMB size larger than their BAR,
18131856 * for example, due to being behind a bridge. Reduce the CMB to
0 commit comments