Skip to content

Commit 96bc956

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
asm-generic/tlb, arch: Invert CONFIG_HAVE_RCU_TABLE_INVALIDATE
Make issuing a TLB invalidate for page-table pages the normal case. The reason is twofold: - too many invalidates is safer than too few, - most architectures use the linux page-tables natively and would thus require this. Make it an opt-out, instead of an opt-in. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 8b6dd0c commit 96bc956

File tree

7 files changed

+9
-8
lines changed

7 files changed

+9
-8
lines changed

arch/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
383383
config HAVE_RCU_TABLE_FREE
384384
bool
385385

386-
config HAVE_RCU_TABLE_INVALIDATE
386+
config HAVE_RCU_TABLE_NO_INVALIDATE
387387
bool
388388

389389
config HAVE_MMU_GATHER_PAGE_SIZE

arch/arm64/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ config ARM64
149149
select HAVE_PERF_USER_STACK_DUMP
150150
select HAVE_REGS_AND_STACK_ACCESS_API
151151
select HAVE_RCU_TABLE_FREE
152-
select HAVE_RCU_TABLE_INVALIDATE
153152
select HAVE_RSEQ
154153
select HAVE_STACKPROTECTOR
155154
select HAVE_SYSCALL_TRACEPOINTS

arch/powerpc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@ config PPC
218218
select HAVE_PERF_REGS
219219
select HAVE_PERF_USER_STACK_DUMP
220220
select HAVE_RCU_TABLE_FREE if SMP
221+
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
221222
select HAVE_MMU_GATHER_PAGE_SIZE
222223
select HAVE_REGS_AND_STACK_ACCESS_API
223224
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN

arch/sparc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ config SPARC64
6363
select HAVE_KRETPROBES
6464
select HAVE_KPROBES
6565
select HAVE_RCU_TABLE_FREE if SMP
66+
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
6667
select HAVE_MEMBLOCK_NODE_MAP
6768
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
6869
select HAVE_DYNAMIC_FTRACE

arch/x86/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,6 @@ config X86
183183
select HAVE_PERF_REGS
184184
select HAVE_PERF_USER_STACK_DUMP
185185
select HAVE_RCU_TABLE_FREE if PARAVIRT
186-
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
187186
select HAVE_REGS_AND_STACK_ACCESS_API
188187
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
189188
select HAVE_FUNCTION_ARG_ACCESS_API

include/asm-generic/tlb.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,11 +135,12 @@
135135
* When used, an architecture is expected to provide __tlb_remove_table()
136136
* which does the actual freeing of these pages.
137137
*
138-
* HAVE_RCU_TABLE_INVALIDATE
138+
* HAVE_RCU_TABLE_NO_INVALIDATE
139139
*
140-
* This makes HAVE_RCU_TABLE_FREE call tlb_flush_mmu_tlbonly() before freeing
141-
* the page-table pages. Required if you use HAVE_RCU_TABLE_FREE and your
142-
* architecture uses the Linux page-tables natively.
140+
* This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
141+
* freeing the page-table pages. This can be avoided if you use
142+
* HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
143+
* page-tables natively.
143144
*
144145
* MMU_GATHER_NO_RANGE
145146
*

mm/mmu_gather.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
157157
*/
158158
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
159159
{
160-
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
160+
#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
161161
/*
162162
* Invalidate page-table caches used by hardware walkers. Then we still
163163
* need to RCU-sched wait while freeing the pages because software

0 commit comments

Comments
 (0)