@@ -1586,9 +1586,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1586
1586
#endif
1587
1587
1588
1588
/*
1589
- * Move the free pages in a range to the freelist tail of the requested type.
1590
- * Note that start_page and end_pages are not aligned on a pageblock
1591
- * boundary. If alignment is required, use move_freepages_block()
1589
+ * Change the type of a block and move all its free pages to that
1590
+ * type's freelist.
1592
1591
*/
1593
1592
static int move_freepages (struct zone * zone , unsigned long start_pfn ,
1594
1593
unsigned long end_pfn , int migratetype )
@@ -1598,6 +1597,9 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
1598
1597
unsigned int order ;
1599
1598
int pages_moved = 0 ;
1600
1599
1600
+ VM_WARN_ON (start_pfn & (pageblock_nr_pages - 1 ));
1601
+ VM_WARN_ON (start_pfn + pageblock_nr_pages - 1 != end_pfn );
1602
+
1601
1603
for (pfn = start_pfn ; pfn <= end_pfn ;) {
1602
1604
page = pfn_to_page (pfn );
1603
1605
if (!PageBuddy (page )) {
@@ -1615,6 +1617,8 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
1615
1617
pages_moved += 1 << order ;
1616
1618
}
1617
1619
1620
+ set_pageblock_migratetype (pfn_to_page (start_pfn ), migratetype );
1621
+
1618
1622
return pages_moved ;
1619
1623
}
1620
1624
@@ -1842,7 +1846,6 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
1842
1846
if (free_pages + alike_pages >= (1 << (pageblock_order - 1 )) ||
1843
1847
page_group_by_mobility_disabled ) {
1844
1848
move_freepages (zone , start_pfn , end_pfn , start_type );
1845
- set_pageblock_migratetype (page , start_type );
1846
1849
return __rmqueue_smallest (zone , order , start_type );
1847
1850
}
1848
1851
@@ -1916,12 +1919,10 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
1916
1919
/* Yoink! */
1917
1920
mt = get_pageblock_migratetype (page );
1918
1921
/* Only reserve normal pageblocks (i.e., they can merge with others) */
1919
- if (migratetype_is_mergeable (mt )) {
1920
- if (move_freepages_block (zone , page , MIGRATE_HIGHATOMIC ) != -1 ) {
1921
- set_pageblock_migratetype ( page , MIGRATE_HIGHATOMIC );
1922
+ if (migratetype_is_mergeable (mt ))
1923
+ if (move_freepages_block (zone , page ,
1924
+ MIGRATE_HIGHATOMIC ) != -1 )
1922
1925
zone -> nr_reserved_highatomic += pageblock_nr_pages ;
1923
- }
1924
- }
1925
1926
1926
1927
out_unlock :
1927
1928
spin_unlock_irqrestore (& zone -> lock , flags );
@@ -2000,7 +2001,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2000
2001
* not fail on zone boundaries.
2001
2002
*/
2002
2003
WARN_ON_ONCE (ret == -1 );
2003
- set_pageblock_migratetype (page , ac -> migratetype );
2004
2004
if (ret > 0 ) {
2005
2005
spin_unlock_irqrestore (& zone -> lock , flags );
2006
2006
return ret ;
@@ -2682,10 +2682,9 @@ int __isolate_free_page(struct page *page, unsigned int order)
2682
2682
* Only change normal pageblocks (i.e., they can merge
2683
2683
* with others)
2684
2684
*/
2685
- if (migratetype_is_mergeable (mt ) &&
2686
- move_freepages_block (zone , page ,
2687
- MIGRATE_MOVABLE ) != -1 )
2688
- set_pageblock_migratetype (page , MIGRATE_MOVABLE );
2685
+ if (migratetype_is_mergeable (mt ))
2686
+ move_freepages_block (zone , page ,
2687
+ MIGRATE_MOVABLE );
2689
2688
}
2690
2689
}
2691
2690
0 commit comments