@@ -2333,9 +2333,9 @@ static int
2333
2333
__domain_mapping (struct dmar_domain * domain , unsigned long iov_pfn ,
2334
2334
unsigned long phys_pfn , unsigned long nr_pages , int prot )
2335
2335
{
2336
+ struct dma_pte * first_pte = NULL , * pte = NULL ;
2336
2337
unsigned int largepage_lvl = 0 ;
2337
2338
unsigned long lvl_pages = 0 ;
2338
- struct dma_pte * pte = NULL ;
2339
2339
phys_addr_t pteval ;
2340
2340
u64 attr ;
2341
2341
@@ -2368,6 +2368,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2368
2368
pte = pfn_to_dma_pte (domain , iov_pfn , & largepage_lvl );
2369
2369
if (!pte )
2370
2370
return - ENOMEM ;
2371
+ first_pte = pte ;
2372
+
2371
2373
/* It is large page*/
2372
2374
if (largepage_lvl > 1 ) {
2373
2375
unsigned long end_pfn ;
@@ -2415,14 +2417,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2415
2417
* recalculate 'pte' and switch back to smaller pages for the
2416
2418
* end of the mapping, if the trailing size is not enough to
2417
2419
* use another superpage (i.e. nr_pages < lvl_pages).
2418
- *
2419
- * We leave clflush for the leaf pte changes to iotlb_sync_map()
2420
- * callback.
2421
2420
*/
2422
2421
pte ++ ;
2423
2422
if (!nr_pages || first_pte_in_page (pte ) ||
2424
- (largepage_lvl > 1 && nr_pages < lvl_pages ))
2423
+ (largepage_lvl > 1 && nr_pages < lvl_pages )) {
2424
+ domain_flush_cache (domain , first_pte ,
2425
+ (void * )pte - (void * )first_pte );
2425
2426
pte = NULL ;
2427
+ }
2426
2428
}
2427
2429
2428
2430
return 0 ;
@@ -5563,39 +5565,6 @@ static bool risky_device(struct pci_dev *pdev)
5563
5565
return false;
5564
5566
}
5565
5567
5566
- static void clflush_sync_map (struct dmar_domain * domain , unsigned long clf_pfn ,
5567
- unsigned long clf_pages )
5568
- {
5569
- struct dma_pte * first_pte = NULL , * pte = NULL ;
5570
- unsigned long lvl_pages = 0 ;
5571
- int level = 0 ;
5572
-
5573
- while (clf_pages > 0 ) {
5574
- if (!pte ) {
5575
- level = 0 ;
5576
- pte = pfn_to_dma_pte (domain , clf_pfn , & level );
5577
- if (WARN_ON (!pte ))
5578
- return ;
5579
- first_pte = pte ;
5580
- lvl_pages = lvl_to_nr_pages (level );
5581
- }
5582
-
5583
- if (WARN_ON (!lvl_pages || clf_pages < lvl_pages ))
5584
- return ;
5585
-
5586
- clf_pages -= lvl_pages ;
5587
- clf_pfn += lvl_pages ;
5588
- pte ++ ;
5589
-
5590
- if (!clf_pages || first_pte_in_page (pte ) ||
5591
- (level > 1 && clf_pages < lvl_pages )) {
5592
- domain_flush_cache (domain , first_pte ,
5593
- (void * )pte - (void * )first_pte );
5594
- pte = NULL ;
5595
- }
5596
- }
5597
- }
5598
-
5599
5568
static void intel_iommu_iotlb_sync_map (struct iommu_domain * domain ,
5600
5569
unsigned long iova , size_t size )
5601
5570
{
@@ -5605,9 +5574,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
5605
5574
struct intel_iommu * iommu ;
5606
5575
int iommu_id ;
5607
5576
5608
- if (!dmar_domain -> iommu_coherency )
5609
- clflush_sync_map (dmar_domain , pfn , pages );
5610
-
5611
5577
for_each_domain_iommu (iommu_id , dmar_domain ) {
5612
5578
iommu = g_iommus [iommu_id ];
5613
5579
__mapping_notify_one (iommu , dmar_domain , pfn , pages );
0 commit comments