Skip to content

Commit 1e8ce83

Browse files
Robin Holttorvalds
authored andcommitted
mm: meminit: move page initialization into a separate function
Currently, memmap_init_zone() has all the smarts for initializing a single page. A subset of this is required for parallel page initialisation and so this patch breaks up the monolithic function in preparation. Signed-off-by: Robin Holt <[email protected]> Signed-off-by: Nathan Zimmer <[email protected]> Signed-off-by: Mel Gorman <[email protected]> Tested-by: Nate Zimmer <[email protected]> Tested-by: Waiman Long <[email protected]> Tested-by: Daniel J Blueman <[email protected]> Acked-by: Pekka Enberg <[email protected]> Cc: Robin Holt <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Waiman Long <[email protected]> Cc: Scott Norton <[email protected]> Cc: "Luck, Tony" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8e7a7f8 commit 1e8ce83

File tree

1 file changed

+46
-33
lines changed

1 file changed

+46
-33
lines changed

mm/page_alloc.c

Lines changed: 46 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
764764
return 0;
765765
}
766766

767+
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
768+
unsigned long zone, int nid)
769+
{
770+
struct zone *z = &NODE_DATA(nid)->node_zones[zone];
771+
772+
set_page_links(page, zone, nid, pfn);
773+
mminit_verify_page_links(page, zone, nid, pfn);
774+
init_page_count(page);
775+
page_mapcount_reset(page);
776+
page_cpupid_reset_last(page);
777+
SetPageReserved(page);
778+
779+
/*
780+
* Mark the block movable so that blocks are reserved for
781+
* movable at startup. This will force kernel allocations
782+
* to reserve their blocks rather than leaking throughout
783+
* the address space during boot when many long-lived
784+
* kernel allocations are made. Later some blocks near
785+
* the start are marked MIGRATE_RESERVE by
786+
* setup_zone_migrate_reserve()
787+
*
788+
* bitmap is created for zone's valid pfn range. but memmap
789+
* can be created for invalid pages (for alignment)
790+
* check here not to call set_pageblock_migratetype() against
791+
* pfn out of zone.
792+
*/
793+
if ((z->zone_start_pfn <= pfn)
794+
&& (pfn < zone_end_pfn(z))
795+
&& !(pfn & (pageblock_nr_pages - 1)))
796+
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
797+
798+
INIT_LIST_HEAD(&page->lru);
799+
#ifdef WANT_PAGE_VIRTUAL
800+
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
801+
if (!is_highmem_idx(zone))
802+
set_page_address(page, __va(pfn << PAGE_SHIFT));
803+
#endif
804+
}
805+
806+
static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
807+
int nid)
808+
{
809+
return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
810+
}
811+
767812
static bool free_pages_prepare(struct page *page, unsigned int order)
768813
{
769814
bool compound = PageCompound(page);
@@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
42124257
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
42134258
unsigned long start_pfn, enum memmap_context context)
42144259
{
4215-
struct page *page;
42164260
unsigned long end_pfn = start_pfn + size;
42174261
unsigned long pfn;
42184262
struct zone *z;
@@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
42334277
if (!early_pfn_in_nid(pfn, nid))
42344278
continue;
42354279
}
4236-
page = pfn_to_page(pfn);
4237-
set_page_links(page, zone, nid, pfn);
4238-
mminit_verify_page_links(page, zone, nid, pfn);
4239-
init_page_count(page);
4240-
page_mapcount_reset(page);
4241-
page_cpupid_reset_last(page);
4242-
SetPageReserved(page);
4243-
/*
4244-
* Mark the block movable so that blocks are reserved for
4245-
* movable at startup. This will force kernel allocations
4246-
* to reserve their blocks rather than leaking throughout
4247-
* the address space during boot when many long-lived
4248-
* kernel allocations are made. Later some blocks near
4249-
* the start are marked MIGRATE_RESERVE by
4250-
* setup_zone_migrate_reserve()
4251-
*
4252-
* bitmap is created for zone's valid pfn range. but memmap
4253-
* can be created for invalid pages (for alignment)
4254-
* check here not to call set_pageblock_migratetype() against
4255-
* pfn out of zone.
4256-
*/
4257-
if ((z->zone_start_pfn <= pfn)
4258-
&& (pfn < zone_end_pfn(z))
4259-
&& !(pfn & (pageblock_nr_pages - 1)))
4260-
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4261-
4262-
INIT_LIST_HEAD(&page->lru);
4263-
#ifdef WANT_PAGE_VIRTUAL
4264-
/* The shift won't overflow because ZONE_NORMAL is below 4G. */
4265-
if (!is_highmem_idx(zone))
4266-
set_page_address(page, __va(pfn << PAGE_SHIFT));
4267-
#endif
4280+
__init_single_pfn(pfn, zone, nid);
42684281
}
42694282
}
42704283

0 commit comments

Comments
 (0)