@@ -5014,7 +5014,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
50145014 * This is the 'heart' of the zoned buddy allocator.
50155015 */
50165016struct page *
5017- __alloc_pages_nodemask (gfp_t gfp_mask , unsigned int order , int preferred_nid ,
5017+ __alloc_pages_nodemask (gfp_t gfp , unsigned int order , int preferred_nid ,
50185018 nodemask_t * nodemask )
50195019{
50205020 struct page * page ;
@@ -5027,21 +5027,21 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50275027 * so bail out early if the request is out of bound.
50285028 */
50295029 if (unlikely (order >= MAX_ORDER )) {
5030- WARN_ON_ONCE (!(gfp_mask & __GFP_NOWARN ));
5030+ WARN_ON_ONCE (!(gfp & __GFP_NOWARN ));
50315031 return NULL ;
50325032 }
50335033
5034- gfp_mask &= gfp_allowed_mask ;
5035- alloc_gfp = gfp_mask ;
5036- if (!prepare_alloc_pages (gfp_mask , order , preferred_nid , nodemask , & ac ,
5034+ gfp &= gfp_allowed_mask ;
5035+ alloc_gfp = gfp ;
5036+ if (!prepare_alloc_pages (gfp , order , preferred_nid , nodemask , & ac ,
50375037 & alloc_gfp , & alloc_flags ))
50385038 return NULL ;
50395039
50405040 /*
50415041 * Forbid the first pass from falling back to types that fragment
50425042 * memory until all local zones are considered.
50435043 */
5044- alloc_flags |= alloc_flags_nofragment (ac .preferred_zoneref -> zone , gfp_mask );
5044+ alloc_flags |= alloc_flags_nofragment (ac .preferred_zoneref -> zone , gfp );
50455045
50465046 /* First allocation attempt */
50475047 page = get_page_from_freelist (alloc_gfp , order , alloc_flags , & ac );
@@ -5054,7 +5054,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50545054 * from a particular context which has been marked by
50555055 * memalloc_no{fs,io}_{save,restore}.
50565056 */
5057- alloc_gfp = current_gfp_context (gfp_mask );
5057+ alloc_gfp = current_gfp_context (gfp );
50585058 ac .spread_dirty_pages = false;
50595059
50605060 /*
@@ -5066,8 +5066,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
50665066 page = __alloc_pages_slowpath (alloc_gfp , order , & ac );
50675067
50685068out :
5069- if (memcg_kmem_enabled () && (gfp_mask & __GFP_ACCOUNT ) && page &&
5070- unlikely (__memcg_kmem_charge_page (page , gfp_mask , order ) != 0 )) {
5069+ if (memcg_kmem_enabled () && (gfp & __GFP_ACCOUNT ) && page &&
5070+ unlikely (__memcg_kmem_charge_page (page , gfp , order ) != 0 )) {
50715071 __free_pages (page , order );
50725072 page = NULL ;
50735073 }
0 commit comments