Skip to content

Commit 60115fa

Browse files
Kefeng Wangtorvalds
authored andcommitted
mm: defer kmemleak object creation of module_alloc()
Yongqiang reports a kmemleak panic when module insmod/rmmod with KASAN enabled(without KASAN_VMALLOC) on x86[1]. When the module area allocates memory, it's kmemleak_object is created successfully, but the KASAN shadow memory of module allocation is not ready, so when kmemleak scan the module's pointer, it will panic due to no shadow memory with KASAN check. module_alloc __vmalloc_node_range kmemleak_vmalloc kmemleak_scan update_checksum kasan_module_alloc kmemleak_ignore Note, there is no problem if KASAN_VMALLOC enabled, the modules area entire shadow memory is preallocated. Thus, the bug only exits on ARCH which supports dynamic allocation of module area per module load, for now, only x86/arm64/s390 are involved. Add a VM_DEFER_KMEMLEAK flags, defer vmalloc'ed object register of kmemleak in module_alloc() to fix this issue. [1] https://lore.kernel.org/all/[email protected]/ [[email protected]: fix build] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: simplify ifdefs, per Andrey] Link: https://lkml.kernel.org/r/CA+fCnZcnwJHUQq34VuRxpdoY6_XbJCDJ-jopksS5Eia4PijPzw@mail.gmail.com Link: https://lkml.kernel.org/r/[email protected] Fixes: 793213a ("s390/kasan: dynamic shadow mem allocation for modules") Fixes: 39d114d ("arm64: add KASAN support") Fixes: bebf56a ("kasan: enable instrumentation of global variables") Signed-off-by: Kefeng Wang <[email protected]> Reported-by: Yongqiang Liu <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Kefeng Wang <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 972fa3a commit 60115fa

File tree

7 files changed

+27
-12
lines changed

7 files changed

+27
-12
lines changed

arch/arm64/kernel/module.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
3636
module_alloc_end = MODULES_END;
3737

3838
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
39-
module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
39+
module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
4040
NUMA_NO_NODE, __builtin_return_address(0));
4141

4242
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
5858
PAGE_KERNEL, 0, NUMA_NO_NODE,
5959
__builtin_return_address(0));
6060

61-
if (p && (kasan_module_alloc(p, size) < 0)) {
61+
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
6262
vfree(p);
6363
return NULL;
6464
}

arch/s390/kernel/module.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,15 @@
3737

3838
void *module_alloc(unsigned long size)
3939
{
40+
gfp_t gfp_mask = GFP_KERNEL;
4041
void *p;
4142

4243
if (PAGE_ALIGN(size) > MODULES_LEN)
4344
return NULL;
4445
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
45-
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
46+
gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
4647
__builtin_return_address(0));
47-
if (p && (kasan_module_alloc(p, size) < 0)) {
48+
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
4849
vfree(p);
4950
return NULL;
5051
}

arch/x86/kernel/module.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,17 +67,18 @@ static unsigned long int get_module_load_offset(void)
6767

6868
void *module_alloc(unsigned long size)
6969
{
70+
gfp_t gfp_mask = GFP_KERNEL;
7071
void *p;
7172

7273
if (PAGE_ALIGN(size) > MODULES_LEN)
7374
return NULL;
7475

7576
p = __vmalloc_node_range(size, MODULE_ALIGN,
7677
MODULES_VADDR + get_module_load_offset(),
77-
MODULES_END, GFP_KERNEL,
78-
PAGE_KERNEL, 0, NUMA_NO_NODE,
78+
MODULES_END, gfp_mask,
79+
PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
7980
__builtin_return_address(0));
80-
if (p && (kasan_module_alloc(p, size) < 0)) {
81+
if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
8182
vfree(p);
8283
return NULL;
8384
}

include/linux/kasan.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -474,12 +474,12 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
474474
* allocations with real shadow memory. With KASAN vmalloc, the special
475475
* case is unnecessary, as the work is handled in the generic case.
476476
*/
477-
int kasan_module_alloc(void *addr, size_t size);
477+
int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
478478
void kasan_free_shadow(const struct vm_struct *vm);
479479

480480
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
481481

482-
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
482+
static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
483483
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
484484

485485
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */

include/linux/vmalloc.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,13 @@ struct notifier_block; /* in notifier.h */
2828
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
2929
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
3030

31+
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
32+
!defined(CONFIG_KASAN_VMALLOC)
33+
#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
34+
#else
35+
#define VM_DEFER_KMEMLEAK 0
36+
#endif
37+
3138
/*
3239
* VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
3340
*

mm/kasan/shadow.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
498498

499499
#else /* CONFIG_KASAN_VMALLOC */
500500

501-
int kasan_module_alloc(void *addr, size_t size)
501+
int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask)
502502
{
503503
void *ret;
504504
size_t scaled_size;
@@ -520,9 +520,14 @@ int kasan_module_alloc(void *addr, size_t size)
520520
__builtin_return_address(0));
521521

522522
if (ret) {
523+
struct vm_struct *vm = find_vm_area(addr);
523524
__memset(ret, KASAN_SHADOW_INIT, shadow_size);
524-
find_vm_area(addr)->flags |= VM_KASAN;
525+
vm->flags |= VM_KASAN;
525526
kmemleak_ignore(ret);
527+
528+
if (vm->flags & VM_DEFER_KMEMLEAK)
529+
kmemleak_vmalloc(vm, size, gfp_mask);
530+
526531
return 0;
527532
}
528533

mm/vmalloc.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3074,7 +3074,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
30743074
clear_vm_uninitialized_flag(area);
30753075

30763076
size = PAGE_ALIGN(size);
3077-
kmemleak_vmalloc(area, size, gfp_mask);
3077+
if (!(vm_flags & VM_DEFER_KMEMLEAK))
3078+
kmemleak_vmalloc(area, size, gfp_mask);
30783079

30793080
return addr;
30803081

0 commit comments

Comments
 (0)