@@ -1361,18 +1361,20 @@ static void destroy_compound_gigantic_folio(struct folio *folio,
13611361 __destroy_compound_gigantic_folio (folio , order , false);
13621362}
13631363
1364- static void free_gigantic_page (struct page * page , unsigned int order )
1364+ static void free_gigantic_folio (struct folio * folio , unsigned int order )
13651365{
13661366 /*
13671367 * If the page isn't allocated using the cma allocator,
13681368 * cma_release() returns false.
13691369 */
13701370#ifdef CONFIG_CMA
1371- if (cma_release (hugetlb_cma [page_to_nid (page )], page , 1 << order ))
1371+ int nid = folio_nid (folio );
1372+
1373+ if (cma_release (hugetlb_cma [nid ], & folio -> page , 1 << order ))
13721374 return ;
13731375#endif
13741376
1375- free_contig_range (page_to_pfn ( page ), 1 << order );
1377+ free_contig_range (folio_pfn ( folio ), 1 << order );
13761378}
13771379
13781380#ifdef CONFIG_CONTIG_ALLOC
@@ -1426,7 +1428,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
14261428{
14271429 return NULL ;
14281430}
1429- static inline void free_gigantic_page (struct page * page , unsigned int order ) { }
1431+ static inline void free_gigantic_folio (struct folio * folio ,
1432+ unsigned int order ) { }
14301433static inline void destroy_compound_gigantic_folio (struct folio * folio ,
14311434 unsigned int order ) { }
14321435#endif
@@ -1565,7 +1568,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
15651568 * If we don't know which subpages are hwpoisoned, we can't free
15661569 * the hugepage, so it's leaked intentionally.
15671570 */
1568- if (HPageRawHwpUnreliable ( page ))
1571+ if (folio_test_hugetlb_raw_hwp_unreliable ( folio ))
15691572 return ;
15701573
15711574 if (hugetlb_vmemmap_restore (h , page )) {
@@ -1575,7 +1578,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
15751578 * page and put the page back on the hugetlb free list and treat
15761579 * as a surplus page.
15771580 */
1578- add_hugetlb_folio (h , page_folio ( page ) , true);
1581+ add_hugetlb_folio (h , folio , true);
15791582 spin_unlock_irq (& hugetlb_lock );
15801583 return ;
15811584 }
@@ -1588,7 +1591,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
15881591 hugetlb_clear_page_hwpoison (& folio -> page );
15891592
15901593 for (i = 0 ; i < pages_per_huge_page (h ); i ++ ) {
1591- subpage = nth_page ( page , i );
1594+ subpage = folio_page ( folio , i );
15921595 subpage -> flags &= ~(1 << PG_locked | 1 << PG_error |
15931596 1 << PG_referenced | 1 << PG_dirty |
15941597 1 << PG_active | 1 << PG_private |
@@ -1597,12 +1600,12 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
15971600
15981601 /*
15991602 * Non-gigantic pages demoted from CMA allocated gigantic pages
1600- * need to be given back to CMA in free_gigantic_page .
1603+ * need to be given back to CMA in free_gigantic_folio .
16011604 */
16021605 if (hstate_is_gigantic (h ) ||
16031606 hugetlb_cma_folio (folio , huge_page_order (h ))) {
16041607 destroy_compound_gigantic_folio (folio , huge_page_order (h ));
1605- free_gigantic_page ( page , huge_page_order (h ));
1608+ free_gigantic_folio ( folio , huge_page_order (h ));
16061609 } else {
16071610 __free_pages (page , huge_page_order (h ));
16081611 }
@@ -2025,6 +2028,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
20252028 nodemask_t * node_alloc_noretry )
20262029{
20272030 struct page * page ;
2031+ struct folio * folio ;
20282032 bool retry = false;
20292033
20302034retry :
@@ -2035,14 +2039,14 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
20352039 nid , nmask , node_alloc_noretry );
20362040 if (!page )
20372041 return NULL ;
2038-
2042+ folio = page_folio ( page );
20392043 if (hstate_is_gigantic (h )) {
20402044 if (!prep_compound_gigantic_page (page , huge_page_order (h ))) {
20412045 /*
20422046 * Rare failure to convert pages to compound page.
20432047 * Free pages and try again - ONCE!
20442048 */
2045- free_gigantic_page ( page , huge_page_order (h ));
2049+ free_gigantic_folio ( folio , huge_page_order (h ));
20462050 if (!retry ) {
20472051 retry = true;
20482052 goto retry ;
@@ -3050,6 +3054,7 @@ static void __init gather_bootmem_prealloc(void)
30503054
30513055 list_for_each_entry (m , & huge_boot_pages , list ) {
30523056 struct page * page = virt_to_page (m );
3057+ struct folio * folio = page_folio (page );
30533058 struct hstate * h = m -> hstate ;
30543059
30553060 VM_BUG_ON (!hstate_is_gigantic (h ));
@@ -3060,7 +3065,7 @@ static void __init gather_bootmem_prealloc(void)
30603065 free_huge_page (page ); /* add to the hugepage allocator */
30613066 } else {
30623067 /* VERY unlikely inflated ref count on a tail page */
3063- free_gigantic_page ( page , huge_page_order (h ));
3068+ free_gigantic_folio ( folio , huge_page_order (h ));
30643069 }
30653070
30663071 /*
0 commit comments