Skip to content

Commit 5e5f6dc

Browse files
stevecapperlinarotorvalds
authored andcommitted
arm64: mm: enable HAVE_RCU_TABLE_FREE logic
In order to implement fast_get_user_pages we need to ensure that the page table walker is protected from page table pages being freed from under it. This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to address spaces with multiple users will be call_rcu_sched freed. Meaning that disabling interrupts will block the free and protect the fast gup page walker. Signed-off-by: Steve Capper <[email protected]> Tested-by: Dann Frazier <[email protected]> Acked-by: Catalin Marinas <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Russell King <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Will Deacon <[email protected]> Cc: Christoffer Dall <[email protected]> Cc: Andrea Arcangeli <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b8cd51a commit 5e5f6dc

File tree

2 files changed

+18
-3
lines changed

2 files changed

+18
-3
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ config ARM64
5757
select HAVE_PERF_EVENTS
5858
select HAVE_PERF_REGS
5959
select HAVE_PERF_USER_STACK_DUMP
60+
select HAVE_RCU_TABLE_FREE
6061
select HAVE_SYSCALL_TRACEPOINTS
6162
select IRQ_DOMAIN
6263
select MODULES_USE_ELF_RELA

arch/arm64/include/asm/tlb.h

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,20 @@
2323

2424
#include <asm-generic/tlb.h>
2525

26+
#include <linux/pagemap.h>
27+
#include <linux/swap.h>
28+
29+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
30+
31+
#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
32+
static inline void __tlb_remove_table(void *_table)
33+
{
34+
free_page_and_swap_cache((struct page *)_table);
35+
}
36+
#else
37+
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
38+
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
39+
2640
/*
2741
* There's three ways the TLB shootdown code is used:
2842
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
@@ -88,15 +102,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
88102
{
89103
pgtable_page_dtor(pte);
90104
tlb_add_flush(tlb, addr);
91-
tlb_remove_page(tlb, pte);
105+
tlb_remove_entry(tlb, pte);
92106
}
93107

94108
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
95109
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
96110
unsigned long addr)
97111
{
98112
tlb_add_flush(tlb, addr);
99-
tlb_remove_page(tlb, virt_to_page(pmdp));
113+
tlb_remove_entry(tlb, virt_to_page(pmdp));
100114
}
101115
#endif
102116

@@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
105119
unsigned long addr)
106120
{
107121
tlb_add_flush(tlb, addr);
108-
tlb_remove_page(tlb, virt_to_page(pudp));
122+
tlb_remove_entry(tlb, virt_to_page(pudp));
109123
}
110124
#endif
111125

0 commit comments

Comments
 (0)