Skip to content

Commit 4ddb4d9

Browse files
mjkravetzakpm00
authored andcommitted
hugetlb: do not update address in huge_pmd_unshare
As an optimization for loops sequentially processing hugetlb address ranges, huge_pmd_unshare would update a passed address if it unshared a pmd. Updating a loop control variable outside the loop like this is generally a bad idea. These loops are now using hugetlb_mask_last_page to optimize scanning when non-present ptes are discovered. The same can be done when huge_pmd_unshare returns 1 indicating a pmd was unshared. Remove address update from huge_pmd_unshare. Change the passed argument type and update all callers. In loops sequentially processing addresses use hugetlb_mask_last_page to update address if pmd is unshared. [[email protected]: fix an unused variable warning/error] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mike Kravetz <[email protected]> Signed-off-by: Stephen Rothwell <[email protected]> Acked-by: Muchun Song <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Cc: "Aneesh Kumar K.V" <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: James Houghton <[email protected]> Cc: kernel test robot <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: Peter Xu <[email protected]> Cc: Rolf Eike Beer <[email protected]> Cc: Will Deacon <[email protected]> Cc: Stephen Rothwell <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 1bcdb76 commit 4ddb4d9

File tree

3 files changed

+21
-31
lines changed

3 files changed

+21
-31
lines changed

include/linux/hugetlb.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
196196
unsigned long addr, unsigned long sz);
197197
unsigned long hugetlb_mask_last_page(struct hstate *h);
198198
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
199-
unsigned long *addr, pte_t *ptep);
199+
unsigned long addr, pte_t *ptep);
200200
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
201201
unsigned long *start, unsigned long *end);
202202
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -243,7 +243,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
243243

244244
static inline int huge_pmd_unshare(struct mm_struct *mm,
245245
struct vm_area_struct *vma,
246-
unsigned long *addr, pte_t *ptep)
246+
unsigned long addr, pte_t *ptep)
247247
{
248248
return 0;
249249
}

mm/hugetlb.c

Lines changed: 17 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4935,7 +4935,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
49354935
struct mm_struct *mm = vma->vm_mm;
49364936
unsigned long old_end = old_addr + len;
49374937
unsigned long last_addr_mask;
4938-
unsigned long old_addr_copy;
49394938
pte_t *src_pte, *dst_pte;
49404939
struct mmu_notifier_range range;
49414940
bool shared_pmd = false;
@@ -4963,14 +4962,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
49634962
if (huge_pte_none(huge_ptep_get(src_pte)))
49644963
continue;
49654964

4966-
/* old_addr arg to huge_pmd_unshare() is a pointer and so the
4967-
* arg may be modified. Pass a copy instead to preserve the
4968-
* value in old_addr.
4969-
*/
4970-
old_addr_copy = old_addr;
4971-
4972-
if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
4965+
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
49734966
shared_pmd = true;
4967+
old_addr |= last_addr_mask;
4968+
new_addr |= last_addr_mask;
49744969
continue;
49754970
}
49764971

@@ -5035,10 +5030,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
50355030
}
50365031

50375032
ptl = huge_pte_lock(h, mm, ptep);
5038-
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5033+
if (huge_pmd_unshare(mm, vma, address, ptep)) {
50395034
spin_unlock(ptl);
50405035
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
50415036
force_flush = true;
5037+
address |= last_addr_mask;
50425038
continue;
50435039
}
50445040

@@ -6327,7 +6323,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
63276323
continue;
63286324
}
63296325
ptl = huge_pte_lock(h, mm, ptep);
6330-
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
6326+
if (huge_pmd_unshare(mm, vma, address, ptep)) {
63316327
/*
63326328
* When uffd-wp is enabled on the vma, unshare
63336329
* shouldn't happen at all. Warn about it if it
@@ -6337,6 +6333,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
63376333
pages++;
63386334
spin_unlock(ptl);
63396335
shared_pmd = true;
6336+
address |= last_addr_mask;
63406337
continue;
63416338
}
63426339
pte = huge_ptep_get(ptep);
@@ -6759,11 +6756,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
67596756
* 0 the underlying pte page is not shared, or it is the last user
67606757
*/
67616758
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6762-
unsigned long *addr, pte_t *ptep)
6759+
unsigned long addr, pte_t *ptep)
67636760
{
6764-
pgd_t *pgd = pgd_offset(mm, *addr);
6765-
p4d_t *p4d = p4d_offset(pgd, *addr);
6766-
pud_t *pud = pud_offset(p4d, *addr);
6761+
pgd_t *pgd = pgd_offset(mm, addr);
6762+
p4d_t *p4d = p4d_offset(pgd, addr);
6763+
pud_t *pud = pud_offset(p4d, addr);
67676764

67686765
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
67696766
BUG_ON(page_count(virt_to_page(ptep)) == 0);
@@ -6773,14 +6770,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
67736770
pud_clear(pud);
67746771
put_page(virt_to_page(ptep));
67756772
mm_dec_nr_pmds(mm);
6776-
/*
6777-
* This update of passed address optimizes loops sequentially
6778-
* processing addresses in increments of huge page size (PMD_SIZE
6779-
* in this case). By clearing the pud, a PUD_SIZE area is unmapped.
6780-
* Update address to the 'last page' in the cleared area so that
6781-
* calling loop can move to first page past this area.
6782-
*/
6783-
*addr |= PUD_SIZE - PMD_SIZE;
67846773
return 1;
67856774
}
67866775

@@ -6792,7 +6781,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
67926781
}
67936782

67946783
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
6795-
unsigned long *addr, pte_t *ptep)
6784+
unsigned long addr, pte_t *ptep)
67966785
{
67976786
return 0;
67986787
}
@@ -6899,6 +6888,10 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
68996888
/* See description above. Architectures can provide their own version. */
69006889
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
69016890
{
6891+
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6892+
if (huge_page_size(h) == PMD_SIZE)
6893+
return PUD_SIZE - PMD_SIZE;
6894+
#endif
69026895
return 0UL;
69036896
}
69046897

@@ -7125,14 +7118,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
71257118
mmu_notifier_invalidate_range_start(&range);
71267119
i_mmap_lock_write(vma->vm_file->f_mapping);
71277120
for (address = start; address < end; address += PUD_SIZE) {
7128-
unsigned long tmp = address;
7129-
71307121
ptep = huge_pte_offset(mm, address, sz);
71317122
if (!ptep)
71327123
continue;
71337124
ptl = huge_pte_lock(h, mm, ptep);
7134-
/* We don't want 'address' to be changed */
7135-
huge_pmd_unshare(mm, vma, &tmp, ptep);
7125+
huge_pmd_unshare(mm, vma, address, ptep);
71367126
spin_unlock(ptl);
71377127
}
71387128
flush_hugetlb_tlb_range(vma, start, end);

mm/rmap.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1559,7 +1559,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
15591559
* do this outside rmap routines.
15601560
*/
15611561
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
1562-
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1562+
if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
15631563
flush_tlb_range(vma, range.start, range.end);
15641564
mmu_notifier_invalidate_range(mm, range.start,
15651565
range.end);
@@ -1920,7 +1920,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
19201920
* do this outside rmap routines.
19211921
*/
19221922
VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
1923-
if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1923+
if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
19241924
flush_tlb_range(vma, range.start, range.end);
19251925
mmu_notifier_invalidate_range(mm, range.start,
19261926
range.end);

0 commit comments

Comments
 (0)