@@ -224,6 +224,10 @@ static int kmemleak_error;
224
224
static unsigned long min_addr = ULONG_MAX ;
225
225
static unsigned long max_addr ;
226
226
227
+ /* minimum and maximum address that may be valid per-CPU pointers */
228
+ static unsigned long min_percpu_addr = ULONG_MAX ;
229
+ static unsigned long max_percpu_addr ;
230
+
227
231
static struct task_struct * scan_thread ;
228
232
/* used to avoid reporting of recently allocated objects */
229
233
static unsigned long jiffies_min_age ;
@@ -294,13 +298,20 @@ static void hex_dump_object(struct seq_file *seq,
294
298
const u8 * ptr = (const u8 * )object -> pointer ;
295
299
size_t len ;
296
300
297
- if (WARN_ON_ONCE (object -> flags & ( OBJECT_PHYS | OBJECT_PERCPU ) ))
301
+ if (WARN_ON_ONCE (object -> flags & OBJECT_PHYS ))
298
302
return ;
299
303
304
+ if (object -> flags & OBJECT_PERCPU )
305
+ ptr = (const u8 * )this_cpu_ptr ((void __percpu * )object -> pointer );
306
+
300
307
/* limit the number of lines to HEX_MAX_LINES */
301
308
len = min_t (size_t , object -> size , HEX_MAX_LINES * HEX_ROW_SIZE );
302
309
303
- warn_or_seq_printf (seq , " hex dump (first %zu bytes):\n" , len );
310
+ if (object -> flags & OBJECT_PERCPU )
311
+ warn_or_seq_printf (seq , " hex dump (first %zu bytes on cpu %d):\n" ,
312
+ len , raw_smp_processor_id ());
313
+ else
314
+ warn_or_seq_printf (seq , " hex dump (first %zu bytes):\n" , len );
304
315
kasan_disable_current ();
305
316
warn_or_seq_hex_dump (seq , DUMP_PREFIX_NONE , HEX_ROW_SIZE ,
306
317
HEX_GROUP_SIZE , kasan_reset_tag ((void * )ptr ), len , HEX_ASCII );
@@ -695,10 +706,14 @@ static int __link_object(struct kmemleak_object *object, unsigned long ptr,
695
706
696
707
untagged_ptr = (unsigned long )kasan_reset_tag ((void * )ptr );
697
708
/*
698
- * Only update min_addr and max_addr with object
699
- * storing virtual address.
709
+ * Only update min_addr and max_addr with object storing virtual
710
+ * address. And update min_percpu_addr max_percpu_addr for per-CPU
711
+ * objects.
700
712
*/
701
- if (!(objflags & (OBJECT_PHYS | OBJECT_PERCPU ))) {
713
+ if (objflags & OBJECT_PERCPU ) {
714
+ min_percpu_addr = min (min_percpu_addr , untagged_ptr );
715
+ max_percpu_addr = max (max_percpu_addr , untagged_ptr + size );
716
+ } else if (!(objflags & OBJECT_PHYS )) {
702
717
min_addr = min (min_addr , untagged_ptr );
703
718
max_addr = max (max_addr , untagged_ptr + size );
704
719
}
@@ -1055,12 +1070,8 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1055
1070
{
1056
1071
pr_debug ("%s(0x%px, %zu)\n" , __func__ , ptr , size );
1057
1072
1058
- /*
1059
- * Percpu allocations are only scanned and not reported as leaks
1060
- * (min_count is set to 0).
1061
- */
1062
1073
if (kmemleak_enabled && ptr && !IS_ERR (ptr ))
1063
- create_object_percpu ((unsigned long )ptr , size , 0 , gfp );
1074
+ create_object_percpu ((unsigned long )ptr , size , 1 , gfp );
1064
1075
}
1065
1076
EXPORT_SYMBOL_GPL (kmemleak_alloc_percpu );
1066
1077
@@ -1304,12 +1315,23 @@ static bool update_checksum(struct kmemleak_object *object)
1304
1315
{
1305
1316
u32 old_csum = object -> checksum ;
1306
1317
1307
- if (WARN_ON_ONCE (object -> flags & ( OBJECT_PHYS | OBJECT_PERCPU ) ))
1318
+ if (WARN_ON_ONCE (object -> flags & OBJECT_PHYS ))
1308
1319
return false;
1309
1320
1310
1321
kasan_disable_current ();
1311
1322
kcsan_disable_current ();
1312
- object -> checksum = crc32 (0 , kasan_reset_tag ((void * )object -> pointer ), object -> size );
1323
+ if (object -> flags & OBJECT_PERCPU ) {
1324
+ unsigned int cpu ;
1325
+
1326
+ object -> checksum = 0 ;
1327
+ for_each_possible_cpu (cpu ) {
1328
+ void * ptr = per_cpu_ptr ((void __percpu * )object -> pointer , cpu );
1329
+
1330
+ object -> checksum ^= crc32 (0 , kasan_reset_tag ((void * )ptr ), object -> size );
1331
+ }
1332
+ } else {
1333
+ object -> checksum = crc32 (0 , kasan_reset_tag ((void * )object -> pointer ), object -> size );
1334
+ }
1313
1335
kasan_enable_current ();
1314
1336
kcsan_enable_current ();
1315
1337
@@ -1340,6 +1362,64 @@ static void update_refs(struct kmemleak_object *object)
1340
1362
}
1341
1363
}
1342
1364
1365
+ static void pointer_update_refs (struct kmemleak_object * scanned ,
1366
+ unsigned long pointer , unsigned int objflags )
1367
+ {
1368
+ struct kmemleak_object * object ;
1369
+ unsigned long untagged_ptr ;
1370
+ unsigned long excess_ref ;
1371
+
1372
+ untagged_ptr = (unsigned long )kasan_reset_tag ((void * )pointer );
1373
+ if (objflags & OBJECT_PERCPU ) {
1374
+ if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr )
1375
+ return ;
1376
+ } else {
1377
+ if (untagged_ptr < min_addr || untagged_ptr >= max_addr )
1378
+ return ;
1379
+ }
1380
+
1381
+ /*
1382
+ * No need for get_object() here since we hold kmemleak_lock.
1383
+ * object->use_count cannot be dropped to 0 while the object
1384
+ * is still present in object_tree_root and object_list
1385
+ * (with updates protected by kmemleak_lock).
1386
+ */
1387
+ object = __lookup_object (pointer , 1 , objflags );
1388
+ if (!object )
1389
+ return ;
1390
+ if (object == scanned )
1391
+ /* self referenced, ignore */
1392
+ return ;
1393
+
1394
+ /*
1395
+ * Avoid the lockdep recursive warning on object->lock being
1396
+ * previously acquired in scan_object(). These locks are
1397
+ * enclosed by scan_mutex.
1398
+ */
1399
+ raw_spin_lock_nested (& object -> lock , SINGLE_DEPTH_NESTING );
1400
+ /* only pass surplus references (object already gray) */
1401
+ if (color_gray (object )) {
1402
+ excess_ref = object -> excess_ref ;
1403
+ /* no need for update_refs() if object already gray */
1404
+ } else {
1405
+ excess_ref = 0 ;
1406
+ update_refs (object );
1407
+ }
1408
+ raw_spin_unlock (& object -> lock );
1409
+
1410
+ if (excess_ref ) {
1411
+ object = lookup_object (excess_ref , 0 );
1412
+ if (!object )
1413
+ return ;
1414
+ if (object == scanned )
1415
+ /* circular reference, ignore */
1416
+ return ;
1417
+ raw_spin_lock_nested (& object -> lock , SINGLE_DEPTH_NESTING );
1418
+ update_refs (object );
1419
+ raw_spin_unlock (& object -> lock );
1420
+ }
1421
+ }
1422
+
1343
1423
/*
1344
1424
* Memory scanning is a long process and it needs to be interruptible. This
1345
1425
* function checks whether such interrupt condition occurred.
@@ -1372,13 +1452,10 @@ static void scan_block(void *_start, void *_end,
1372
1452
unsigned long * start = PTR_ALIGN (_start , BYTES_PER_POINTER );
1373
1453
unsigned long * end = _end - (BYTES_PER_POINTER - 1 );
1374
1454
unsigned long flags ;
1375
- unsigned long untagged_ptr ;
1376
1455
1377
1456
raw_spin_lock_irqsave (& kmemleak_lock , flags );
1378
1457
for (ptr = start ; ptr < end ; ptr ++ ) {
1379
- struct kmemleak_object * object ;
1380
1458
unsigned long pointer ;
1381
- unsigned long excess_ref ;
1382
1459
1383
1460
if (scan_should_stop ())
1384
1461
break ;
@@ -1387,50 +1464,8 @@ static void scan_block(void *_start, void *_end,
1387
1464
pointer = * (unsigned long * )kasan_reset_tag ((void * )ptr );
1388
1465
kasan_enable_current ();
1389
1466
1390
- untagged_ptr = (unsigned long )kasan_reset_tag ((void * )pointer );
1391
- if (untagged_ptr < min_addr || untagged_ptr >= max_addr )
1392
- continue ;
1393
-
1394
- /*
1395
- * No need for get_object() here since we hold kmemleak_lock.
1396
- * object->use_count cannot be dropped to 0 while the object
1397
- * is still present in object_tree_root and object_list
1398
- * (with updates protected by kmemleak_lock).
1399
- */
1400
- object = lookup_object (pointer , 1 );
1401
- if (!object )
1402
- continue ;
1403
- if (object == scanned )
1404
- /* self referenced, ignore */
1405
- continue ;
1406
-
1407
- /*
1408
- * Avoid the lockdep recursive warning on object->lock being
1409
- * previously acquired in scan_object(). These locks are
1410
- * enclosed by scan_mutex.
1411
- */
1412
- raw_spin_lock_nested (& object -> lock , SINGLE_DEPTH_NESTING );
1413
- /* only pass surplus references (object already gray) */
1414
- if (color_gray (object )) {
1415
- excess_ref = object -> excess_ref ;
1416
- /* no need for update_refs() if object already gray */
1417
- } else {
1418
- excess_ref = 0 ;
1419
- update_refs (object );
1420
- }
1421
- raw_spin_unlock (& object -> lock );
1422
-
1423
- if (excess_ref ) {
1424
- object = lookup_object (excess_ref , 0 );
1425
- if (!object )
1426
- continue ;
1427
- if (object == scanned )
1428
- /* circular reference, ignore */
1429
- continue ;
1430
- raw_spin_lock_nested (& object -> lock , SINGLE_DEPTH_NESTING );
1431
- update_refs (object );
1432
- raw_spin_unlock (& object -> lock );
1433
- }
1467
+ pointer_update_refs (scanned , pointer , 0 );
1468
+ pointer_update_refs (scanned , pointer , OBJECT_PERCPU );
1434
1469
}
1435
1470
raw_spin_unlock_irqrestore (& kmemleak_lock , flags );
1436
1471
}
0 commit comments