Skip to content

Commit d2d7867

Browse files
Frank van der Lindenakpm00
authored andcommitted
mm/hugetlb: enable bootmem allocation from CMA areas
If hugetlb_cma_only is enabled, we know that hugetlb pages can only be allocated from CMA. Now that there is an interface to do early reservations from a CMA area (returning memblock memory), it can be used to allocate hugetlb pages from CMA. This also allows for doing pre-HVO on these pages (if enabled). Make sure to initialize the page structures and associated data correctly. Create a flag to signal that a hugetlb page has been allocated from CMA to make things a little easier. Some configurations of powerpc have a special hugetlb bootmem allocator, so introduce a boolean arch_specific_huge_bootmem_alloc that returns true if such an allocator is present. In that case, CMA bootmem allocations can't be used, so check that function before trying. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Frank van der Linden <[email protected]> Cc: Madhavan Srinivasan <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Dan Carpenter <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Joao Martins <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Muchun Song <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Roman Gushchin (Cruise) <[email protected]> Cc: Usama Arif <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Yu Zhao <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent f866cfc commit d2d7867

File tree

3 files changed

+152
-39
lines changed

3 files changed

+152
-39
lines changed

arch/powerpc/include/asm/book3s/64/hugetlb.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,4 +94,10 @@ static inline int check_and_get_huge_psize(int shift)
9494
return mmu_psize;
9595
}
9696

97+
#define arch_has_huge_bootmem_alloc arch_has_huge_bootmem_alloc
98+
99+
static inline bool arch_has_huge_bootmem_alloc(void)
100+
{
101+
return (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled());
102+
}
97103
#endif

include/linux/hugetlb.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -591,6 +591,7 @@ enum hugetlb_page_flags {
591591
HPG_freed,
592592
HPG_vmemmap_optimized,
593593
HPG_raw_hwp_unreliable,
594+
HPG_cma,
594595
__NR_HPAGEFLAGS,
595596
};
596597

@@ -650,6 +651,7 @@ HPAGEFLAG(Temporary, temporary)
650651
HPAGEFLAG(Freed, freed)
651652
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
652653
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
654+
HPAGEFLAG(Cma, cma)
653655

654656
#ifdef CONFIG_HUGETLB_PAGE
655657

@@ -678,14 +680,18 @@ struct hstate {
678680
char name[HSTATE_NAME_LEN];
679681
};
680682

683+
struct cma;
684+
681685
struct huge_bootmem_page {
682686
struct list_head list;
683687
struct hstate *hstate;
684688
unsigned long flags;
689+
struct cma *cma;
685690
};
686691

687692
#define HUGE_BOOTMEM_HVO 0x0001
688693
#define HUGE_BOOTMEM_ZONES_VALID 0x0002
694+
#define HUGE_BOOTMEM_CMA 0x0004
689695

690696
bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
691697

@@ -824,6 +830,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
824830
}
825831
#endif
826832

833+
#ifndef arch_has_huge_bootmem_alloc
834+
/*
835+
* Some architectures do their own bootmem allocation, so they can't use
836+
* early CMA allocation.
837+
*/
838+
static inline bool arch_has_huge_bootmem_alloc(void)
839+
{
840+
return false;
841+
}
842+
#endif
843+
827844
static inline struct hstate *folio_hstate(struct folio *folio)
828845
{
829846
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);

mm/hugetlb.c

Lines changed: 129 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -131,8 +131,10 @@ static void hugetlb_free_folio(struct folio *folio)
131131
#ifdef CONFIG_CMA
132132
int nid = folio_nid(folio);
133133

134-
if (cma_free_folio(hugetlb_cma[nid], folio))
134+
if (folio_test_hugetlb_cma(folio)) {
135+
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
135136
return;
137+
}
136138
#endif
137139
folio_put(folio);
138140
}
@@ -1508,6 +1510,9 @@ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
15081510
break;
15091511
}
15101512
}
1513+
1514+
if (folio)
1515+
folio_set_hugetlb_cma(folio);
15111516
}
15121517
#endif
15131518
if (!folio) {
@@ -3186,6 +3191,86 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
31863191
return ERR_PTR(-ENOSPC);
31873192
}
31883193

3194+
static bool __init hugetlb_early_cma(struct hstate *h)
3195+
{
3196+
if (arch_has_huge_bootmem_alloc())
3197+
return false;
3198+
3199+
return (hstate_is_gigantic(h) && hugetlb_cma_only);
3200+
}
3201+
3202+
static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3203+
{
3204+
struct huge_bootmem_page *m;
3205+
unsigned long flags;
3206+
struct cma *cma;
3207+
int listnode = nid;
3208+
3209+
#ifdef CONFIG_CMA
3210+
if (hugetlb_early_cma(h)) {
3211+
flags = HUGE_BOOTMEM_CMA;
3212+
cma = hugetlb_cma[nid];
3213+
m = cma_reserve_early(cma, huge_page_size(h));
3214+
if (!m) {
3215+
int node;
3216+
3217+
if (node_exact)
3218+
return NULL;
3219+
for_each_online_node(node) {
3220+
cma = hugetlb_cma[node];
3221+
if (!cma || node == nid)
3222+
continue;
3223+
m = cma_reserve_early(cma, huge_page_size(h));
3224+
if (m) {
3225+
listnode = node;
3226+
break;
3227+
}
3228+
}
3229+
}
3230+
} else
3231+
#endif
3232+
{
3233+
flags = 0;
3234+
cma = NULL;
3235+
if (node_exact)
3236+
m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3237+
huge_page_size(h), 0,
3238+
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3239+
else {
3240+
m = memblock_alloc_try_nid_raw(huge_page_size(h),
3241+
huge_page_size(h), 0,
3242+
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3243+
/*
3244+
* For pre-HVO to work correctly, pages need to be on
3245+
* the list for the node they were actually allocated
3246+
* from. That node may be different in the case of
3247+
* fallback by memblock_alloc_try_nid_raw. So,
3248+
* extract the actual node first.
3249+
*/
3250+
if (m)
3251+
listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3252+
}
3253+
}
3254+
3255+
if (m) {
3256+
/*
3257+
* Use the beginning of the huge page to store the
3258+
* huge_bootmem_page struct (until gather_bootmem
3259+
* puts them into the mem_map).
3260+
*
3261+
* Put them into a private list first because mem_map
3262+
* is not up yet.
3263+
*/
3264+
INIT_LIST_HEAD(&m->list);
3265+
list_add(&m->list, &huge_boot_pages[listnode]);
3266+
m->hstate = h;
3267+
m->flags = flags;
3268+
m->cma = cma;
3269+
}
3270+
3271+
return m;
3272+
}
3273+
31893274
int alloc_bootmem_huge_page(struct hstate *h, int nid)
31903275
__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
31913276
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
@@ -3195,22 +3280,15 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
31953280

31963281
/* do node specific alloc */
31973282
if (nid != NUMA_NO_NODE) {
3198-
m = memblock_alloc_exact_nid_raw(huge_page_size(h), huge_page_size(h),
3199-
0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3283+
m = alloc_bootmem(h, node, true);
32003284
if (!m)
32013285
return 0;
32023286
goto found;
32033287
}
3288+
32043289
/* allocate from next node when distributing huge pages */
32053290
for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_ONLINE]) {
3206-
m = memblock_alloc_try_nid_raw(
3207-
huge_page_size(h), huge_page_size(h),
3208-
0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3209-
/*
3210-
* Use the beginning of the huge page to store the
3211-
* huge_bootmem_page struct (until gather_bootmem
3212-
* puts them into the mem_map).
3213-
*/
3291+
m = alloc_bootmem(h, node, false);
32143292
if (!m)
32153293
return 0;
32163294
goto found;
@@ -3228,21 +3306,6 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
32283306
memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
32293307
huge_page_size(h) - PAGE_SIZE);
32303308

3231-
/*
3232-
* Put them into a private list first because mem_map is not up yet.
3233-
*
3234-
* For pre-HVO to work correctly, pages need to be on the list for
3235-
* the node they were actually allocated from. That node may be
3236-
* different in the case of fallback by memblock_alloc_try_nid_raw.
3237-
* So, extract the actual node first.
3238-
*/
3239-
if (nid == NUMA_NO_NODE)
3240-
node = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m)));
3241-
3242-
INIT_LIST_HEAD(&m->list);
3243-
list_add(&m->list, &huge_boot_pages[node]);
3244-
m->hstate = h;
3245-
m->flags = 0;
32463309
return 1;
32473310
}
32483311

@@ -3283,13 +3346,25 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
32833346
prep_compound_head((struct page *)folio, huge_page_order(h));
32843347
}
32853348

3349+
static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3350+
{
3351+
return m->flags & HUGE_BOOTMEM_HVO;
3352+
}
3353+
3354+
static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3355+
{
3356+
return m->flags & HUGE_BOOTMEM_CMA;
3357+
}
3358+
32863359
/*
32873360
* memblock-allocated pageblocks might not have the migrate type set
32883361
* if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3289-
* here.
3362+
* here, or MIGRATE_CMA if this was a page allocated through an early CMA
3363+
* reservation.
32903364
*
3291-
* Note that this will not write the page struct, it is ok (and necessary)
3292-
* to do this on vmemmap optimized folios.
3365+
* In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3366+
* read-only, but that's ok - for sparse vmemmap this does not write to
3367+
* the page structure.
32933368
*/
32943369
static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
32953370
struct hstate *h)
@@ -3298,9 +3373,13 @@ static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
32983373

32993374
WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
33003375

3301-
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
3302-
set_pageblock_migratetype(folio_page(folio, i),
3376+
for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3377+
if (folio_test_hugetlb_cma(folio))
3378+
init_cma_pageblock(folio_page(folio, i));
3379+
else
3380+
set_pageblock_migratetype(folio_page(folio, i),
33033381
MIGRATE_MOVABLE);
3382+
}
33043383
}
33053384

33063385
static void __init prep_and_add_bootmem_folios(struct hstate *h,
@@ -3346,10 +3425,16 @@ bool __init hugetlb_bootmem_page_zones_valid(int nid,
33463425
return true;
33473426
}
33483427

3428+
if (hugetlb_bootmem_page_earlycma(m)) {
3429+
valid = cma_validate_zones(m->cma);
3430+
goto out;
3431+
}
3432+
33493433
start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
33503434

33513435
valid = !pfn_range_intersects_zones(nid, start_pfn,
33523436
pages_per_huge_page(m->hstate));
3437+
out:
33533438
if (!valid)
33543439
hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
33553440

@@ -3378,11 +3463,6 @@ static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
33783463
}
33793464
}
33803465

3381-
static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3382-
{
3383-
return (m->flags & HUGE_BOOTMEM_HVO);
3384-
}
3385-
33863466
/*
33873467
* Put bootmem huge pages into the standard lists after mem_map is up.
33883468
* Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
@@ -3432,14 +3512,21 @@ static void __init gather_bootmem_prealloc_node(unsigned long nid)
34323512
*/
34333513
folio_set_hugetlb_vmemmap_optimized(folio);
34343514

3515+
if (hugetlb_bootmem_page_earlycma(m))
3516+
folio_set_hugetlb_cma(folio);
3517+
34353518
list_add(&folio->lru, &folio_list);
34363519

34373520
/*
34383521
* We need to restore the 'stolen' pages to totalram_pages
34393522
* in order to fix confusing memory reports from free(1) and
34403523
* other side-effects, like CommitLimit going negative.
3524+
*
3525+
* For CMA pages, this is done in init_cma_pageblock
3526+
* (via hugetlb_bootmem_init_migratetype), so skip it here.
34413527
*/
3442-
adjust_managed_page_count(page, pages_per_huge_page(h));
3528+
if (!folio_test_hugetlb_cma(folio))
3529+
adjust_managed_page_count(page, pages_per_huge_page(h));
34433530
cond_resched();
34443531
}
34453532

@@ -3624,8 +3711,11 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
36243711
{
36253712
unsigned long allocated;
36263713

3627-
/* skip gigantic hugepages allocation if hugetlb_cma enabled */
3628-
if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3714+
/*
3715+
* Skip gigantic hugepages allocation if early CMA
3716+
* reservations are not available.
3717+
*/
3718+
if (hstate_is_gigantic(h) && hugetlb_cma_size && !hugetlb_early_cma(h)) {
36293719
pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
36303720
return;
36313721
}

0 commit comments

Comments
 (0)