@@ -137,28 +137,28 @@ void vfree(const void *addr)
137
137
}
138
138
EXPORT_SYMBOL (vfree );
139
139
140
- void * __vmalloc (unsigned long size , gfp_t gfp_mask )
140
+ void * __vmalloc_noprof (unsigned long size , gfp_t gfp_mask )
141
141
{
142
142
/*
143
143
* You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
144
144
* returns only a logical address.
145
145
*/
146
- return kmalloc (size , (gfp_mask | __GFP_COMP ) & ~__GFP_HIGHMEM );
146
+ return kmalloc_noprof (size , (gfp_mask | __GFP_COMP ) & ~__GFP_HIGHMEM );
147
147
}
148
- EXPORT_SYMBOL (__vmalloc );
148
+ EXPORT_SYMBOL (__vmalloc_noprof );
149
149
150
- void * __vmalloc_node_range (unsigned long size , unsigned long align ,
150
+ void * __vmalloc_node_range_noprof (unsigned long size , unsigned long align ,
151
151
unsigned long start , unsigned long end , gfp_t gfp_mask ,
152
152
pgprot_t prot , unsigned long vm_flags , int node ,
153
153
const void * caller )
154
154
{
155
- return __vmalloc (size , gfp_mask );
155
+ return __vmalloc_noprof (size , gfp_mask );
156
156
}
157
157
158
- void * __vmalloc_node (unsigned long size , unsigned long align , gfp_t gfp_mask ,
158
+ void * __vmalloc_node_noprof (unsigned long size , unsigned long align , gfp_t gfp_mask ,
159
159
int node , const void * caller )
160
160
{
161
- return __vmalloc (size , gfp_mask );
161
+ return __vmalloc_noprof (size , gfp_mask );
162
162
}
163
163
164
164
static void * __vmalloc_user_flags (unsigned long size , gfp_t flags )
@@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
179
179
return ret ;
180
180
}
181
181
182
- void * vmalloc_user (unsigned long size )
182
+ void * vmalloc_user_noprof (unsigned long size )
183
183
{
184
184
return __vmalloc_user_flags (size , GFP_KERNEL | __GFP_ZERO );
185
185
}
186
- EXPORT_SYMBOL (vmalloc_user );
186
+ EXPORT_SYMBOL (vmalloc_user_noprof );
187
187
188
188
struct page * vmalloc_to_page (const void * addr )
189
189
{
@@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
217
217
* For tight control over page level allocator and protection flags
218
218
* use __vmalloc() instead.
219
219
*/
220
- void * vmalloc (unsigned long size )
220
+ void * vmalloc_noprof (unsigned long size )
221
221
{
222
- return __vmalloc (size , GFP_KERNEL );
222
+ return __vmalloc_noprof (size , GFP_KERNEL );
223
223
}
224
- EXPORT_SYMBOL (vmalloc );
224
+ EXPORT_SYMBOL (vmalloc_noprof );
225
225
226
- void * vmalloc_huge (unsigned long size , gfp_t gfp_mask ) __weak __alias (__vmalloc );
226
+ void * vmalloc_huge_noprof (unsigned long size , gfp_t gfp_mask ) __weak __alias (__vmalloc_noprof );
227
227
228
228
/*
229
229
* vzalloc - allocate virtually contiguous memory with zero fill
@@ -237,11 +237,11 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc)
237
237
* For tight control over page level allocator and protection flags
238
238
* use __vmalloc() instead.
239
239
*/
240
- void * vzalloc (unsigned long size )
240
+ void * vzalloc_noprof (unsigned long size )
241
241
{
242
- return __vmalloc (size , GFP_KERNEL | __GFP_ZERO );
242
+ return __vmalloc_noprof (size , GFP_KERNEL | __GFP_ZERO );
243
243
}
244
- EXPORT_SYMBOL (vzalloc );
244
+ EXPORT_SYMBOL (vzalloc_noprof );
245
245
246
246
/**
247
247
* vmalloc_node - allocate memory on a specific node
@@ -254,11 +254,11 @@ EXPORT_SYMBOL(vzalloc);
254
254
* For tight control over page level allocator and protection flags
255
255
* use __vmalloc() instead.
256
256
*/
257
- void * vmalloc_node (unsigned long size , int node )
257
+ void * vmalloc_node_noprof (unsigned long size , int node )
258
258
{
259
- return vmalloc (size );
259
+ return vmalloc_noprof (size );
260
260
}
261
- EXPORT_SYMBOL (vmalloc_node );
261
+ EXPORT_SYMBOL (vmalloc_node_noprof );
262
262
263
263
/**
264
264
* vzalloc_node - allocate memory on a specific node with zero fill
@@ -272,11 +272,11 @@ EXPORT_SYMBOL(vmalloc_node);
272
272
* For tight control over page level allocator and protection flags
273
273
* use __vmalloc() instead.
274
274
*/
275
- void * vzalloc_node (unsigned long size , int node )
275
+ void * vzalloc_node_noprof (unsigned long size , int node )
276
276
{
277
- return vzalloc (size );
277
+ return vzalloc_noprof (size );
278
278
}
279
- EXPORT_SYMBOL (vzalloc_node );
279
+ EXPORT_SYMBOL (vzalloc_node_noprof );
280
280
281
281
/**
282
282
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
@@ -285,11 +285,11 @@ EXPORT_SYMBOL(vzalloc_node);
285
285
* Allocate enough 32bit PA addressable pages to cover @size from the
286
286
* page level allocator and map them into contiguous kernel virtual space.
287
287
*/
288
- void * vmalloc_32 (unsigned long size )
288
+ void * vmalloc_32_noprof (unsigned long size )
289
289
{
290
- return __vmalloc (size , GFP_KERNEL );
290
+ return __vmalloc_noprof (size , GFP_KERNEL );
291
291
}
292
- EXPORT_SYMBOL (vmalloc_32 );
292
+ EXPORT_SYMBOL (vmalloc_32_noprof );
293
293
294
294
/**
295
295
* vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
@@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32);
301
301
* VM_USERMAP is set on the corresponding VMA so that subsequent calls to
302
302
* remap_vmalloc_range() are permissible.
303
303
*/
304
- void * vmalloc_32_user (unsigned long size )
304
+ void * vmalloc_32_user_noprof (unsigned long size )
305
305
{
306
306
/*
307
307
* We'll have to sort out the ZONE_DMA bits for 64-bit,
308
308
* but for now this can simply use vmalloc_user() directly.
309
309
*/
310
- return vmalloc_user (size );
310
+ return vmalloc_user_noprof (size );
311
311
}
312
- EXPORT_SYMBOL (vmalloc_32_user );
312
+ EXPORT_SYMBOL (vmalloc_32_user_noprof );
313
313
314
314
void * vmap (struct page * * pages , unsigned int count , unsigned long flags , pgprot_t prot )
315
315
{
0 commit comments