Skip to content

Commit 12a8cc7

Browse files
aryabininIngo Molnar
authored andcommitted
x86/kasan: Use the same shadow offset for 4- and 5-level paging
We are going to support boot-time switching between 4- and 5-level paging. For KASAN it means we cannot have different KASAN_SHADOW_OFFSET for different paging modes: the constant is passed to gcc to generate code and cannot be changed at runtime. This patch changes KASAN code to use 0xdffffc0000000000 as shadow offset for both 4- and 5-level paging. For 5-level paging it means that shadow memory region is not aligned to PGD boundary anymore and we have to handle unaligned parts of the region properly. In addition, we have to exclude paravirt code from KASAN instrumentation as we now use set_pgd() before KASAN is fully ready. [[email protected]: clenaup, changelog message] Signed-off-by: Andrey Ryabinin <[email protected]> Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Cyrill Gorcunov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 83e3c48 commit 12a8cc7

File tree

4 files changed

+83
-24
lines changed

4 files changed

+83
-24
lines changed

Documentation/x86/x86_64/mm.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
3434
ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
3535
ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
3636
... unused hole ...
37-
ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
37+
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
3838
... unused hole ...
3939
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
4040
... unused hole ...

arch/x86/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
302302
config KASAN_SHADOW_OFFSET
303303
hex
304304
depends on KASAN
305-
default 0xdff8000000000000 if X86_5LEVEL
306305
default 0xdffffc0000000000
307306

308307
config HAVE_INTEL_TXT

arch/x86/kernel/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ endif
2424
KASAN_SANITIZE_head$(BITS).o := n
2525
KASAN_SANITIZE_dumpstack.o := n
2626
KASAN_SANITIZE_dumpstack_$(BITS).o := n
27-
KASAN_SANITIZE_stacktrace.o := n
27+
KASAN_SANITIZE_stacktrace.o := n
28+
KASAN_SANITIZE_paravirt.o := n
2829

2930
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
3031
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y

arch/x86/mm/kasan_init_64.c

Lines changed: 80 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515

1616
extern struct range pfn_mapped[E820_MAX_ENTRIES];
1717

18+
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
19+
1820
static int __init map_range(struct range *range)
1921
{
2022
unsigned long start;
@@ -30,8 +32,10 @@ static void __init clear_pgds(unsigned long start,
3032
unsigned long end)
3133
{
3234
pgd_t *pgd;
35+
/* See comment in kasan_init() */
36+
unsigned long pgd_end = end & PGDIR_MASK;
3337

34-
for (; start < end; start += PGDIR_SIZE) {
38+
for (; start < pgd_end; start += PGDIR_SIZE) {
3539
pgd = pgd_offset_k(start);
3640
/*
3741
* With folded p4d, pgd_clear() is nop, use p4d_clear()
@@ -42,29 +46,61 @@ static void __init clear_pgds(unsigned long start,
4246
else
4347
pgd_clear(pgd);
4448
}
49+
50+
pgd = pgd_offset_k(start);
51+
for (; start < end; start += P4D_SIZE)
52+
p4d_clear(p4d_offset(pgd, start));
53+
}
54+
55+
static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
56+
{
57+
unsigned long p4d;
58+
59+
if (!IS_ENABLED(CONFIG_X86_5LEVEL))
60+
return (p4d_t *)pgd;
61+
62+
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
63+
p4d += __START_KERNEL_map - phys_base;
64+
return (p4d_t *)p4d + p4d_index(addr);
65+
}
66+
67+
static void __init kasan_early_p4d_populate(pgd_t *pgd,
68+
unsigned long addr,
69+
unsigned long end)
70+
{
71+
pgd_t pgd_entry;
72+
p4d_t *p4d, p4d_entry;
73+
unsigned long next;
74+
75+
if (pgd_none(*pgd)) {
76+
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
77+
set_pgd(pgd, pgd_entry);
78+
}
79+
80+
p4d = early_p4d_offset(pgd, addr);
81+
do {
82+
next = p4d_addr_end(addr, end);
83+
84+
if (!p4d_none(*p4d))
85+
continue;
86+
87+
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
88+
set_p4d(p4d, p4d_entry);
89+
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
4590
}
4691

4792
static void __init kasan_map_early_shadow(pgd_t *pgd)
4893
{
49-
int i;
50-
unsigned long start = KASAN_SHADOW_START;
94+
/* See comment in kasan_init() */
95+
unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
5196
unsigned long end = KASAN_SHADOW_END;
97+
unsigned long next;
5298

53-
for (i = pgd_index(start); start < end; i++) {
54-
switch (CONFIG_PGTABLE_LEVELS) {
55-
case 4:
56-
pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
57-
_KERNPG_TABLE);
58-
break;
59-
case 5:
60-
pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
61-
_KERNPG_TABLE);
62-
break;
63-
default:
64-
BUILD_BUG();
65-
}
66-
start += PGDIR_SIZE;
67-
}
99+
pgd += pgd_index(addr);
100+
do {
101+
next = pgd_addr_end(addr, end);
102+
kasan_early_p4d_populate(pgd, addr, next);
103+
} while (pgd++, addr = next, addr != end);
68104
}
69105

70106
#ifdef CONFIG_KASAN_INLINE
@@ -101,7 +137,7 @@ void __init kasan_early_init(void)
101137
for (i = 0; i < PTRS_PER_PUD; i++)
102138
kasan_zero_pud[i] = __pud(pud_val);
103139

104-
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
140+
for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
105141
kasan_zero_p4d[i] = __p4d(p4d_val);
106142

107143
kasan_map_early_shadow(early_top_pgt);
@@ -117,12 +153,35 @@ void __init kasan_init(void)
117153
#endif
118154

119155
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
156+
157+
/*
158+
* We use the same shadow offset for 4- and 5-level paging to
159+
* facilitate boot-time switching between paging modes.
160+
* As result in 5-level paging mode KASAN_SHADOW_START and
161+
* KASAN_SHADOW_END are not aligned to PGD boundary.
162+
*
163+
* KASAN_SHADOW_START doesn't share PGD with anything else.
164+
* We claim whole PGD entry to make things easier.
165+
*
166+
* KASAN_SHADOW_END lands in the last PGD entry and it collides with
167+
* bunch of things like kernel code, modules, EFI mapping, etc.
168+
* We need to take extra steps to not overwrite them.
169+
*/
170+
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
171+
void *ptr;
172+
173+
ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
174+
memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
175+
set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
176+
__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
177+
}
178+
120179
load_cr3(early_top_pgt);
121180
__flush_tlb_all();
122181

123-
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
182+
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
124183

125-
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
184+
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
126185
kasan_mem_to_shadow((void *)PAGE_OFFSET));
127186

128187
for (i = 0; i < E820_MAX_ENTRIES; i++) {

0 commit comments

Comments
 (0)