Skip to content

Commit 75cc101

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Move clflush'es from iotlb_sync_map() to map_pages()
As the Intel VT-d driver has switched to use the iommu_ops.map_pages() callback, multiple pages of the same size will be mapped in a call. There's no need to put the clflush'es in iotlb_sync_map() callback. Move them back into __domain_mapping() to simplify the code. Signed-off-by: Lu Baolu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 3f34f12 commit 75cc101

File tree

1 file changed

+7
-41
lines changed

1 file changed

+7
-41
lines changed

drivers/iommu/intel/iommu.c

Lines changed: 7 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -2333,9 +2333,9 @@ static int
23332333
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
23342334
unsigned long phys_pfn, unsigned long nr_pages, int prot)
23352335
{
2336+
struct dma_pte *first_pte = NULL, *pte = NULL;
23362337
unsigned int largepage_lvl = 0;
23372338
unsigned long lvl_pages = 0;
2338-
struct dma_pte *pte = NULL;
23392339
phys_addr_t pteval;
23402340
u64 attr;
23412341

@@ -2368,6 +2368,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
23682368
pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
23692369
if (!pte)
23702370
return -ENOMEM;
2371+
first_pte = pte;
2372+
23712373
/* It is large page*/
23722374
if (largepage_lvl > 1) {
23732375
unsigned long end_pfn;
@@ -2415,14 +2417,14 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
24152417
* recalculate 'pte' and switch back to smaller pages for the
24162418
* end of the mapping, if the trailing size is not enough to
24172419
* use another superpage (i.e. nr_pages < lvl_pages).
2418-
*
2419-
* We leave clflush for the leaf pte changes to iotlb_sync_map()
2420-
* callback.
24212420
*/
24222421
pte++;
24232422
if (!nr_pages || first_pte_in_page(pte) ||
2424-
(largepage_lvl > 1 && nr_pages < lvl_pages))
2423+
(largepage_lvl > 1 && nr_pages < lvl_pages)) {
2424+
domain_flush_cache(domain, first_pte,
2425+
(void *)pte - (void *)first_pte);
24252426
pte = NULL;
2427+
}
24262428
}
24272429

24282430
return 0;
@@ -5563,39 +5565,6 @@ static bool risky_device(struct pci_dev *pdev)
55635565
return false;
55645566
}
55655567

5566-
static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
5567-
unsigned long clf_pages)
5568-
{
5569-
struct dma_pte *first_pte = NULL, *pte = NULL;
5570-
unsigned long lvl_pages = 0;
5571-
int level = 0;
5572-
5573-
while (clf_pages > 0) {
5574-
if (!pte) {
5575-
level = 0;
5576-
pte = pfn_to_dma_pte(domain, clf_pfn, &level);
5577-
if (WARN_ON(!pte))
5578-
return;
5579-
first_pte = pte;
5580-
lvl_pages = lvl_to_nr_pages(level);
5581-
}
5582-
5583-
if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
5584-
return;
5585-
5586-
clf_pages -= lvl_pages;
5587-
clf_pfn += lvl_pages;
5588-
pte++;
5589-
5590-
if (!clf_pages || first_pte_in_page(pte) ||
5591-
(level > 1 && clf_pages < lvl_pages)) {
5592-
domain_flush_cache(domain, first_pte,
5593-
(void *)pte - (void *)first_pte);
5594-
pte = NULL;
5595-
}
5596-
}
5597-
}
5598-
55995568
static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
56005569
unsigned long iova, size_t size)
56015570
{
@@ -5605,9 +5574,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
56055574
struct intel_iommu *iommu;
56065575
int iommu_id;
56075576

5608-
if (!dmar_domain->iommu_coherency)
5609-
clflush_sync_map(dmar_domain, pfn, pages);
5610-
56115577
for_each_domain_iommu(iommu_id, dmar_domain) {
56125578
iommu = g_iommus[iommu_id];
56135579
__mapping_notify_one(iommu, dmar_domain, pfn, pages);

0 commit comments

Comments
 (0)