Skip to content

Commit 5502ea4

Browse files
xzpeterakpm00
authored andcommitted
mm/hugetlb: add page_mask for hugetlb_follow_page_mask()
follow_page() doesn't need it, but we'll start to need it when unifying gup for hugetlb. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Peter Xu <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: James Houghton <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: John Hubbard <[email protected]> Cc: Kirill A . Shutemov <[email protected]> Cc: Lorenzo Stoakes <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Mike Rapoport (IBM) <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Yang Shi <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 458568c commit 5502ea4

File tree

3 files changed

+11
-5
lines changed

3 files changed

+11
-5
lines changed

include/linux/hugetlb.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
131131
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
132132
struct vm_area_struct *, struct vm_area_struct *);
133133
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
134-
unsigned long address, unsigned int flags);
134+
unsigned long address, unsigned int flags,
135+
unsigned int *page_mask);
135136
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
136137
struct page **, unsigned long *, unsigned long *,
137138
long, unsigned int, int *);
@@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible(
297298
{
298299
}
299300

300-
static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
301-
unsigned long address, unsigned int flags)
301+
static inline struct page *hugetlb_follow_page_mask(
302+
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
303+
unsigned int *page_mask)
302304
{
303305
BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
304306
}

mm/gup.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -824,7 +824,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
824824
* Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
825825
*/
826826
if (is_vm_hugetlb_page(vma))
827-
return hugetlb_follow_page_mask(vma, address, flags);
827+
return hugetlb_follow_page_mask(vma, address, flags,
828+
&ctx->page_mask);
828829

829830
pgd = pgd_offset(mm, address);
830831

mm/hugetlb.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6454,7 +6454,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
64546454
}
64556455

64566456
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6457-
unsigned long address, unsigned int flags)
6457+
unsigned long address, unsigned int flags,
6458+
unsigned int *page_mask)
64586459
{
64596460
struct hstate *h = hstate_vma(vma);
64606461
struct mm_struct *mm = vma->vm_mm;
@@ -6504,6 +6505,8 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
65046505
page = ERR_PTR(ret);
65056506
goto out;
65066507
}
6508+
6509+
*page_mask = (1U << huge_page_order(h)) - 1;
65076510
}
65086511
out:
65096512
spin_unlock(ptl);

0 commit comments

Comments
 (0)