Skip to content

Commit 84172f4

Browse files
Matthew Wilcox (Oracle)torvalds
authored andcommitted
mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask
There are only two callers of __alloc_pages() so prune the thicket of alloc_page variants by combining the two functions together. Current callers of __alloc_pages() simply add an extra 'NULL' parameter and current callers of __alloc_pages_nodemask() call __alloc_pages() instead. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Mike Rapoport <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 6e5e0f2 commit 84172f4

File tree

7 files changed

+13
-21
lines changed

7 files changed

+13
-21
lines changed

Documentation/admin-guide/mm/transhuge.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -402,7 +402,7 @@ compact_fail
402402
but failed.
403403

404404
It is possible to establish how long the stalls were using the function
405-
tracer to record how long was spent in __alloc_pages_nodemask and
405+
tracer to record how long was spent in __alloc_pages() and
406406
using the mm_page_alloc tracepoint to identify which allocations were
407407
for huge pages.
408408

include/linux/gfp.h

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -515,15 +515,8 @@ static inline int arch_make_page_accessible(struct page *page)
515515
}
516516
#endif
517517

518-
struct page *
519-
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
520-
nodemask_t *nodemask);
521-
522-
static inline struct page *
523-
__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
524-
{
525-
return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
526-
}
518+
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
519+
nodemask_t *nodemask);
527520

528521
/*
529522
* Allocate pages, preferring the node given as nid. The node must be valid and
@@ -535,7 +528,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
535528
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
536529
VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
537530

538-
return __alloc_pages(gfp_mask, order, nid);
531+
return __alloc_pages(gfp_mask, order, nid, NULL);
539532
}
540533

541534
/*

mm/hugetlb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1616,7 +1616,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
16161616
gfp_mask |= __GFP_RETRY_MAYFAIL;
16171617
if (nid == NUMA_NO_NODE)
16181618
nid = numa_mem_id();
1619-
page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1619+
page = __alloc_pages(gfp_mask, order, nid, nmask);
16201620
if (page)
16211621
__count_vm_event(HTLB_BUDDY_PGALLOC);
16221622
else

mm/internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
145145
* family of functions.
146146
*
147147
* nodemask, migratetype and highest_zoneidx are initialized only once in
148-
* __alloc_pages_nodemask() and then never change.
148+
* __alloc_pages() and then never change.
149149
*
150150
* zonelist, preferred_zone and highest_zoneidx are set first in
151-
* __alloc_pages_nodemask() for the fast path, and might be later changed
151+
* __alloc_pages() for the fast path, and might be later changed
152152
* in __alloc_pages_slowpath(). All other functions pass the whole structure
153153
* by a const pointer.
154154
*/

mm/mempolicy.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2140,7 +2140,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
21402140
{
21412141
struct page *page;
21422142

2143-
page = __alloc_pages(gfp, order, nid);
2143+
page = __alloc_pages(gfp, order, nid, NULL);
21442144
/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21452145
if (!static_branch_likely(&vm_numa_stat_key))
21462146
return page;
@@ -2237,7 +2237,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
22372237

22382238
nmask = policy_nodemask(gfp, pol);
22392239
preferred_nid = policy_node(gfp, pol, node);
2240-
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2240+
page = __alloc_pages(gfp, order, preferred_nid, nmask);
22412241
mpol_cond_put(pol);
22422242
out:
22432243
return page;
@@ -2274,7 +2274,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
22742274
if (pol->mode == MPOL_INTERLEAVE)
22752275
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22762276
else
2277-
page = __alloc_pages_nodemask(gfp, order,
2277+
page = __alloc_pages(gfp, order,
22782278
policy_node(gfp, pol, numa_node_id()),
22792279
policy_nodemask(gfp, pol));
22802280

mm/migrate.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1617,7 +1617,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
16171617
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
16181618
gfp_mask |= __GFP_HIGHMEM;
16191619

1620-
new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
1620+
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
16211621

16221622
if (new_page && PageTransHuge(new_page))
16231623
prep_transhuge_page(new_page);

mm/page_alloc.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5013,8 +5013,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
50135013
/*
50145014
* This is the 'heart' of the zoned buddy allocator.
50155015
*/
5016-
struct page *
5017-
__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
5016+
struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
50185017
nodemask_t *nodemask)
50195018
{
50205019
struct page *page;
@@ -5076,7 +5075,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid,
50765075

50775076
return page;
50785077
}
5079-
EXPORT_SYMBOL(__alloc_pages_nodemask);
5078+
EXPORT_SYMBOL(__alloc_pages);
50805079

50815080
/*
50825081
* Common helper functions. Never use with __GFP_HIGHMEM because the returned

0 commit comments

Comments
 (0)