Skip to content

Commit 952a31c

Browse files
Martin SchwidefskyIngo Molnar
authored andcommitted
asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y
Add the Kconfig option HAVE_MMU_GATHER_NO_GATHER to the generic mmu_gather code. If the option is set the mmu_gather will not track individual pages for delayed page free anymore. A platform that enables the option needs to provide its own implementation of the __tlb_remove_page_size() function to free pages. No change in behavior intended. Signed-off-by: Martin Schwidefsky <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 6137fed commit 952a31c

File tree

3 files changed

+70
-49
lines changed

3 files changed

+70
-49
lines changed

arch/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -389,6 +389,9 @@ config HAVE_RCU_TABLE_NO_INVALIDATE
389389
config HAVE_MMU_GATHER_PAGE_SIZE
390390
bool
391391

392+
config HAVE_MMU_GATHER_NO_GATHER
393+
bool
394+
392395
config ARCH_HAVE_NMI_SAFE_CMPXCHG
393396
bool
394397

include/asm-generic/tlb.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
191191

192192
#endif
193193

194+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
194195
/*
195196
* If we can't allocate a page to make a big batch of page pointers
196197
* to work on, then just handle a few from the on-stack structure.
@@ -215,6 +216,10 @@ struct mmu_gather_batch {
215216
*/
216217
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
217218

219+
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
220+
int page_size);
221+
#endif
222+
218223
/*
219224
* struct mmu_gather is an opaque type used by the mm code for passing around
220225
* any data needed by arch specific code for tlb_remove_page.
@@ -261,13 +266,15 @@ struct mmu_gather {
261266

262267
unsigned int batch_count;
263268

269+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
264270
struct mmu_gather_batch *active;
265271
struct mmu_gather_batch local;
266272
struct page *__pages[MMU_GATHER_BUNDLE];
267273

268274
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
269275
unsigned int page_size;
270276
#endif
277+
#endif
271278
};
272279

273280
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
@@ -276,8 +283,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb);
276283
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
277284
unsigned long start, unsigned long end, bool force);
278285
void tlb_flush_mmu_free(struct mmu_gather *tlb);
279-
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
280-
int page_size);
281286

282287
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
283288
unsigned long address,

mm/mmu_gather.c

Lines changed: 60 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313

1414
#ifdef HAVE_GENERIC_MMU_GATHER
1515

16+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
17+
1618
static bool tlb_next_batch(struct mmu_gather *tlb)
1719
{
1820
struct mmu_gather_batch *batch;
@@ -41,19 +43,72 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
4143
return true;
4244
}
4345

46+
static void tlb_batch_pages_flush(struct mmu_gather *tlb)
47+
{
48+
struct mmu_gather_batch *batch;
49+
50+
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
51+
free_pages_and_swap_cache(batch->pages, batch->nr);
52+
batch->nr = 0;
53+
}
54+
tlb->active = &tlb->local;
55+
}
56+
57+
static void tlb_batch_list_free(struct mmu_gather *tlb)
58+
{
59+
struct mmu_gather_batch *batch, *next;
60+
61+
for (batch = tlb->local.next; batch; batch = next) {
62+
next = batch->next;
63+
free_pages((unsigned long)batch, 0);
64+
}
65+
tlb->local.next = NULL;
66+
}
67+
68+
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
69+
{
70+
struct mmu_gather_batch *batch;
71+
72+
VM_BUG_ON(!tlb->end);
73+
74+
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
75+
VM_WARN_ON(tlb->page_size != page_size);
76+
#endif
77+
78+
batch = tlb->active;
79+
/*
80+
* Add the page and check if we are full. If so
81+
* force a flush.
82+
*/
83+
batch->pages[batch->nr++] = page;
84+
if (batch->nr == batch->max) {
85+
if (!tlb_next_batch(tlb))
86+
return true;
87+
batch = tlb->active;
88+
}
89+
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
90+
91+
return false;
92+
}
93+
94+
#endif /* HAVE_MMU_GATHER_NO_GATHER */
95+
4496
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
4597
unsigned long start, unsigned long end)
4698
{
4799
tlb->mm = mm;
48100

49101
/* Is it from 0 to ~0? */
50102
tlb->fullmm = !(start | (end+1));
103+
104+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
51105
tlb->need_flush_all = 0;
52106
tlb->local.next = NULL;
53107
tlb->local.nr = 0;
54108
tlb->local.max = ARRAY_SIZE(tlb->__pages);
55109
tlb->active = &tlb->local;
56110
tlb->batch_count = 0;
111+
#endif
57112

58113
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
59114
tlb->batch = NULL;
@@ -67,16 +122,12 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
67122

68123
void tlb_flush_mmu_free(struct mmu_gather *tlb)
69124
{
70-
struct mmu_gather_batch *batch;
71-
72125
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
73126
tlb_table_flush(tlb);
74127
#endif
75-
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
76-
free_pages_and_swap_cache(batch->pages, batch->nr);
77-
batch->nr = 0;
78-
}
79-
tlb->active = &tlb->local;
128+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
129+
tlb_batch_pages_flush(tlb);
130+
#endif
80131
}
81132

82133
void tlb_flush_mmu(struct mmu_gather *tlb)
@@ -92,8 +143,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
92143
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
93144
unsigned long start, unsigned long end, bool force)
94145
{
95-
struct mmu_gather_batch *batch, *next;
96-
97146
if (force) {
98147
__tlb_reset_range(tlb);
99148
__tlb_adjust_range(tlb, start, end - start);
@@ -103,45 +152,9 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
103152

104153
/* keep the page table cache within bounds */
105154
check_pgt_cache();
106-
107-
for (batch = tlb->local.next; batch; batch = next) {
108-
next = batch->next;
109-
free_pages((unsigned long)batch, 0);
110-
}
111-
tlb->local.next = NULL;
112-
}
113-
114-
/* __tlb_remove_page
115-
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
116-
* handling the additional races in SMP caused by other CPUs caching valid
117-
* mappings in their TLBs. Returns the number of free page slots left.
118-
* When out of page slots we must call tlb_flush_mmu().
119-
*returns true if the caller should flush.
120-
*/
121-
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
122-
{
123-
struct mmu_gather_batch *batch;
124-
125-
VM_BUG_ON(!tlb->end);
126-
127-
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
128-
VM_WARN_ON(tlb->page_size != page_size);
155+
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
156+
tlb_batch_list_free(tlb);
129157
#endif
130-
131-
batch = tlb->active;
132-
/*
133-
* Add the page and check if we are full. If so
134-
* force a flush.
135-
*/
136-
batch->pages[batch->nr++] = page;
137-
if (batch->nr == batch->max) {
138-
if (!tlb_next_batch(tlb))
139-
return true;
140-
batch = tlb->active;
141-
}
142-
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
143-
144-
return false;
145158
}
146159

147160
#endif /* HAVE_GENERIC_MMU_GATHER */

0 commit comments

Comments
 (0)