Skip to content

Commit d1c6095

Browse files
sidkumar99akpm00
authored andcommitted
mm/hugetlb: convert hugetlb prep functions to folios
Convert prep_new_huge_page() and __prep_compound_gigantic_page() to folios. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Sidhartha Kumar <[email protected]> Reviewed-by: Mike Kravetz <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: John Hubbard <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Muchun Song <[email protected]> Cc: Rasmus Villemoes <[email protected]> Cc: Tarun Sahu <[email protected]> Cc: Wei Chen <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7f325a8 commit d1c6095

File tree

1 file changed

+30
-33
lines changed

1 file changed

+30
-33
lines changed

mm/hugetlb.c

Lines changed: 30 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1789,29 +1789,27 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
17891789
set_hugetlb_cgroup_rsvd(folio, NULL);
17901790
}
17911791

1792-
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1792+
static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
17931793
{
1794-
struct folio *folio = page_folio(page);
1795-
17961794
__prep_new_hugetlb_folio(h, folio);
17971795
spin_lock_irq(&hugetlb_lock);
17981796
__prep_account_new_huge_page(h, nid);
17991797
spin_unlock_irq(&hugetlb_lock);
18001798
}
18011799

1802-
static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
1803-
bool demote)
1800+
static bool __prep_compound_gigantic_folio(struct folio *folio,
1801+
unsigned int order, bool demote)
18041802
{
18051803
int i, j;
18061804
int nr_pages = 1 << order;
18071805
struct page *p;
18081806

1809-
/* we rely on prep_new_huge_page to set the destructor */
1810-
set_compound_order(page, order);
1811-
__ClearPageReserved(page);
1812-
__SetPageHead(page);
1807+
/* we rely on prep_new_hugetlb_folio to set the destructor */
1808+
folio_set_compound_order(folio, order);
1809+
__folio_clear_reserved(folio);
1810+
__folio_set_head(folio);
18131811
for (i = 0; i < nr_pages; i++) {
1814-
p = nth_page(page, i);
1812+
p = folio_page(folio, i);
18151813

18161814
/*
18171815
* For gigantic hugepages allocated through bootmem at
@@ -1853,43 +1851,41 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
18531851
VM_BUG_ON_PAGE(page_count(p), p);
18541852
}
18551853
if (i != 0)
1856-
set_compound_head(p, page);
1854+
set_compound_head(p, &folio->page);
18571855
}
1858-
atomic_set(compound_mapcount_ptr(page), -1);
1859-
atomic_set(subpages_mapcount_ptr(page), 0);
1860-
atomic_set(compound_pincount_ptr(page), 0);
1856+
atomic_set(folio_mapcount_ptr(folio), -1);
1857+
atomic_set(folio_subpages_mapcount_ptr(folio), 0);
1858+
atomic_set(folio_pincount_ptr(folio), 0);
18611859
return true;
18621860

18631861
out_error:
18641862
/* undo page modifications made above */
18651863
for (j = 0; j < i; j++) {
1866-
p = nth_page(page, j);
1864+
p = folio_page(folio, j);
18671865
if (j != 0)
18681866
clear_compound_head(p);
18691867
set_page_refcounted(p);
18701868
}
18711869
/* need to clear PG_reserved on remaining tail pages */
18721870
for (; j < nr_pages; j++) {
1873-
p = nth_page(page, j);
1871+
p = folio_page(folio, j);
18741872
__ClearPageReserved(p);
18751873
}
1876-
set_compound_order(page, 0);
1877-
#ifdef CONFIG_64BIT
1878-
page[1].compound_nr = 0;
1879-
#endif
1880-
__ClearPageHead(page);
1874+
folio_set_compound_order(folio, 0);
1875+
__folio_clear_head(folio);
18811876
return false;
18821877
}
18831878

1884-
static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
1879+
static bool prep_compound_gigantic_folio(struct folio *folio,
1880+
unsigned int order)
18851881
{
1886-
return __prep_compound_gigantic_page(page, order, false);
1882+
return __prep_compound_gigantic_folio(folio, order, false);
18871883
}
18881884

1889-
static bool prep_compound_gigantic_page_for_demote(struct page *page,
1885+
static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
18901886
unsigned int order)
18911887
{
1892-
return __prep_compound_gigantic_page(page, order, true);
1888+
return __prep_compound_gigantic_folio(folio, order, true);
18931889
}
18941890

18951891
/*
@@ -2041,7 +2037,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
20412037
return NULL;
20422038
folio = page_folio(page);
20432039
if (hstate_is_gigantic(h)) {
2044-
if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
2040+
if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
20452041
/*
20462042
* Rare failure to convert pages to compound page.
20472043
* Free pages and try again - ONCE!
@@ -2054,7 +2050,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
20542050
return NULL;
20552051
}
20562052
}
2057-
prep_new_huge_page(h, page, page_to_nid(page));
2053+
prep_new_hugetlb_folio(h, folio, folio_nid(folio));
20582054

20592055
return page;
20602056
}
@@ -3058,10 +3054,10 @@ static void __init gather_bootmem_prealloc(void)
30583054
struct hstate *h = m->hstate;
30593055

30603056
VM_BUG_ON(!hstate_is_gigantic(h));
3061-
WARN_ON(page_count(page) != 1);
3062-
if (prep_compound_gigantic_page(page, huge_page_order(h))) {
3063-
WARN_ON(PageReserved(page));
3064-
prep_new_huge_page(h, page, page_to_nid(page));
3057+
WARN_ON(folio_ref_count(folio) != 1);
3058+
if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3059+
WARN_ON(folio_test_reserved(folio));
3060+
prep_new_hugetlb_folio(h, folio, folio_nid(folio));
30653061
free_huge_page(page); /* add to the hugepage allocator */
30663062
} else {
30673063
/* VERY unlikely inflated ref count on a tail page */
@@ -3480,13 +3476,14 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
34803476
for (i = 0; i < pages_per_huge_page(h);
34813477
i += pages_per_huge_page(target_hstate)) {
34823478
subpage = nth_page(page, i);
3479+
folio = page_folio(subpage);
34833480
if (hstate_is_gigantic(target_hstate))
3484-
prep_compound_gigantic_page_for_demote(subpage,
3481+
prep_compound_gigantic_folio_for_demote(folio,
34853482
target_hstate->order);
34863483
else
34873484
prep_compound_page(subpage, target_hstate->order);
34883485
set_page_private(subpage, 0);
3489-
prep_new_huge_page(target_hstate, subpage, nid);
3486+
prep_new_hugetlb_folio(target_hstate, folio, nid);
34903487
free_huge_page(subpage);
34913488
}
34923489
mutex_unlock(&target_hstate->resize_lock);

0 commit comments

Comments
 (0)