@@ -4935,7 +4935,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
49354935 struct mm_struct * mm = vma -> vm_mm ;
49364936 unsigned long old_end = old_addr + len ;
49374937 unsigned long last_addr_mask ;
4938- unsigned long old_addr_copy ;
49394938 pte_t * src_pte , * dst_pte ;
49404939 struct mmu_notifier_range range ;
49414940 bool shared_pmd = false;
@@ -4963,14 +4962,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
49634962 if (huge_pte_none (huge_ptep_get (src_pte )))
49644963 continue ;
49654964
4966- /* old_addr arg to huge_pmd_unshare() is a pointer and so the
4967- * arg may be modified. Pass a copy instead to preserve the
4968- * value in old_addr.
4969- */
4970- old_addr_copy = old_addr ;
4971-
4972- if (huge_pmd_unshare (mm , vma , & old_addr_copy , src_pte )) {
4965+ if (huge_pmd_unshare (mm , vma , old_addr , src_pte )) {
49734966 shared_pmd = true;
4967+ old_addr |= last_addr_mask ;
4968+ new_addr |= last_addr_mask ;
49744969 continue ;
49754970 }
49764971
@@ -5035,10 +5030,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
50355030 }
50365031
50375032 ptl = huge_pte_lock (h , mm , ptep );
5038- if (huge_pmd_unshare (mm , vma , & address , ptep )) {
5033+ if (huge_pmd_unshare (mm , vma , address , ptep )) {
50395034 spin_unlock (ptl );
50405035 tlb_flush_pmd_range (tlb , address & PUD_MASK , PUD_SIZE );
50415036 force_flush = true;
5037+ address |= last_addr_mask ;
50425038 continue ;
50435039 }
50445040
@@ -6327,7 +6323,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
63276323 continue ;
63286324 }
63296325 ptl = huge_pte_lock (h , mm , ptep );
6330- if (huge_pmd_unshare (mm , vma , & address , ptep )) {
6326+ if (huge_pmd_unshare (mm , vma , address , ptep )) {
63316327 /*
63326328 * When uffd-wp is enabled on the vma, unshare
63336329 * shouldn't happen at all. Warn about it if it
@@ -6337,6 +6333,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
63376333 pages ++ ;
63386334 spin_unlock (ptl );
63396335 shared_pmd = true;
6336+ address |= last_addr_mask ;
63406337 continue ;
63416338 }
63426339 pte = huge_ptep_get (ptep );
@@ -6759,11 +6756,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
67596756 * 0 the underlying pte page is not shared, or it is the last user
67606757 */
67616758int huge_pmd_unshare (struct mm_struct * mm , struct vm_area_struct * vma ,
6762- unsigned long * addr , pte_t * ptep )
6759+ unsigned long addr , pte_t * ptep )
67636760{
6764- pgd_t * pgd = pgd_offset (mm , * addr );
6765- p4d_t * p4d = p4d_offset (pgd , * addr );
6766- pud_t * pud = pud_offset (p4d , * addr );
6761+ pgd_t * pgd = pgd_offset (mm , addr );
6762+ p4d_t * p4d = p4d_offset (pgd , addr );
6763+ pud_t * pud = pud_offset (p4d , addr );
67676764
67686765 i_mmap_assert_write_locked (vma -> vm_file -> f_mapping );
67696766 BUG_ON (page_count (virt_to_page (ptep )) == 0 );
@@ -6773,14 +6770,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
67736770 pud_clear (pud );
67746771 put_page (virt_to_page (ptep ));
67756772 mm_dec_nr_pmds (mm );
6776- /*
6777- * This update of passed address optimizes loops sequentially
6778- * processing addresses in increments of huge page size (PMD_SIZE
6779- * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
6780- * Update address to the 'last page' in the cleared area so that
6781- * calling loop can move to first page past this area.
6782- */
6783- * addr |= PUD_SIZE - PMD_SIZE ;
67846773 return 1 ;
67856774}
67866775
@@ -6792,7 +6781,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
67926781}
67936782
67946783int huge_pmd_unshare (struct mm_struct * mm , struct vm_area_struct * vma ,
6795- unsigned long * addr , pte_t * ptep )
6784+ unsigned long addr , pte_t * ptep )
67966785{
67976786 return 0 ;
67986787}
@@ -6899,6 +6888,10 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
68996888/* See description above. Architectures can provide their own version. */
69006889__weak unsigned long hugetlb_mask_last_page (struct hstate * h )
69016890{
6891+ #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6892+ if (huge_page_size (h ) == PMD_SIZE )
6893+ return PUD_SIZE - PMD_SIZE ;
6894+ #endif
69026895 return 0UL ;
69036896}
69046897
@@ -7125,14 +7118,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
71257118 mmu_notifier_invalidate_range_start (& range );
71267119 i_mmap_lock_write (vma -> vm_file -> f_mapping );
71277120 for (address = start ; address < end ; address += PUD_SIZE ) {
7128- unsigned long tmp = address ;
7129-
71307121 ptep = huge_pte_offset (mm , address , sz );
71317122 if (!ptep )
71327123 continue ;
71337124 ptl = huge_pte_lock (h , mm , ptep );
7134- /* We don't want 'address' to be changed */
7135- huge_pmd_unshare (mm , vma , & tmp , ptep );
7125+ huge_pmd_unshare (mm , vma , address , ptep );
71367126 spin_unlock (ptl );
71377127 }
71387128 flush_hugetlb_tlb_range (vma , start , end );
0 commit comments