Skip to content

Commit e154700

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
ia64/tlb: Convert to generic mmu_gather
Generic mmu_gather provides everything ia64 needs (range tracking). No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Tony Luck <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent b78180b commit e154700

File tree

3 files changed

+47
-257
lines changed

3 files changed

+47
-257
lines changed

arch/ia64/include/asm/tlb.h

Lines changed: 1 addition & 255 deletions
Original file line numberDiff line numberDiff line change
@@ -47,262 +47,8 @@
4747
#include <asm/tlbflush.h>
4848
#include <asm/machvec.h>
4949

50-
/*
51-
* If we can't allocate a page to make a big batch of page pointers
52-
* to work on, then just handle a few from the on-stack structure.
53-
*/
54-
#define IA64_GATHER_BUNDLE 8
55-
56-
struct mmu_gather {
57-
struct mm_struct *mm;
58-
unsigned int nr;
59-
unsigned int max;
60-
unsigned char fullmm; /* non-zero means full mm flush */
61-
unsigned char need_flush; /* really unmapped some PTEs? */
62-
unsigned long start, end;
63-
unsigned long start_addr;
64-
unsigned long end_addr;
65-
struct page **pages;
66-
struct page *local[IA64_GATHER_BUNDLE];
67-
};
68-
69-
struct ia64_tr_entry {
70-
u64 ifa;
71-
u64 itir;
72-
u64 pte;
73-
u64 rr;
74-
}; /*Record for tr entry!*/
75-
76-
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
77-
extern void ia64_ptr_entry(u64 target_mask, int slot);
78-
79-
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80-
81-
/*
82-
region register macros
83-
*/
84-
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
85-
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
86-
#define RR_VE_MASK 0x0000000000000001L
87-
#define RR_VE_SHIFT 0
88-
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
89-
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
90-
#define RR_PS_MASK 0x00000000000000fcL
91-
#define RR_PS_SHIFT 2
92-
#define RR_RID_MASK 0x00000000ffffff00L
93-
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
94-
95-
static inline void
96-
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
97-
{
98-
tlb->need_flush = 0;
99-
100-
if (tlb->fullmm) {
101-
/*
102-
* Tearing down the entire address space. This happens both as a result
103-
* of exit() and execve(). The latter case necessitates the call to
104-
* flush_tlb_mm() here.
105-
*/
106-
flush_tlb_mm(tlb->mm);
107-
} else if (unlikely (end - start >= 1024*1024*1024*1024UL
108-
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
109-
{
110-
/*
111-
* If we flush more than a tera-byte or across regions, we're probably
112-
* better off just flushing the entire TLB(s). This should be very rare
113-
* and is not worth optimizing for.
114-
*/
115-
flush_tlb_all();
116-
} else {
117-
/*
118-
* flush_tlb_range() takes a vma instead of a mm pointer because
119-
* some architectures want the vm_flags for ITLB/DTLB flush.
120-
*/
121-
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
122-
123-
/* flush the address range from the tlb: */
124-
flush_tlb_range(&vma, start, end);
125-
/* now flush the virt. page-table area mapping the address range: */
126-
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
127-
}
128-
129-
}
130-
131-
static inline void
132-
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133-
{
134-
unsigned long i;
135-
unsigned int nr;
136-
137-
/* lastly, release the freed pages */
138-
nr = tlb->nr;
139-
140-
tlb->nr = 0;
141-
tlb->start_addr = ~0UL;
142-
for (i = 0; i < nr; ++i)
143-
free_page_and_swap_cache(tlb->pages[i]);
144-
}
145-
146-
/*
147-
* Flush the TLB for address range START to END and, if not in fast mode, release the
148-
* freed pages that where gathered up to this point.
149-
*/
150-
static inline void
151-
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152-
{
153-
if (!tlb->need_flush)
154-
return;
155-
ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156-
ia64_tlb_flush_mmu_free(tlb);
157-
}
158-
159-
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
160-
{
161-
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
162-
163-
if (addr) {
164-
tlb->pages = (void *)addr;
165-
tlb->max = PAGE_SIZE / sizeof(void *);
166-
}
167-
}
168-
169-
170-
static inline void
171-
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
172-
unsigned long start, unsigned long end)
173-
{
174-
tlb->mm = mm;
175-
tlb->max = ARRAY_SIZE(tlb->local);
176-
tlb->pages = tlb->local;
177-
tlb->nr = 0;
178-
tlb->fullmm = !(start | (end+1));
179-
tlb->start = start;
180-
tlb->end = end;
181-
tlb->start_addr = ~0UL;
182-
}
183-
184-
/*
185-
* Called at the end of the shootdown operation to free up any resources that were
186-
* collected.
187-
*/
188-
static inline void
189-
arch_tlb_finish_mmu(struct mmu_gather *tlb,
190-
unsigned long start, unsigned long end, bool force)
191-
{
192-
if (force)
193-
tlb->need_flush = 1;
194-
/*
195-
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
196-
* tlb->end_addr.
197-
*/
198-
ia64_tlb_flush_mmu(tlb, start, end);
199-
200-
/* keep the page table cache within bounds */
201-
check_pgt_cache();
202-
203-
if (tlb->pages != tlb->local)
204-
free_pages((unsigned long)tlb->pages, 0);
205-
}
206-
207-
/*
208-
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
209-
* must be delayed until after the TLB has been flushed (see comments at the beginning of
210-
* this file).
211-
*/
212-
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
213-
{
214-
tlb->need_flush = 1;
215-
216-
if (!tlb->nr && tlb->pages == tlb->local)
217-
__tlb_alloc_page(tlb);
218-
219-
tlb->pages[tlb->nr++] = page;
220-
VM_WARN_ON(tlb->nr > tlb->max);
221-
if (tlb->nr == tlb->max)
222-
return true;
223-
return false;
224-
}
225-
226-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
227-
{
228-
ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
229-
}
230-
231-
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
232-
{
233-
ia64_tlb_flush_mmu_free(tlb);
234-
}
235-
236-
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
237-
{
238-
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
239-
}
240-
241-
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
242-
{
243-
if (__tlb_remove_page(tlb, page))
244-
tlb_flush_mmu(tlb);
245-
}
246-
247-
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
248-
struct page *page, int page_size)
249-
{
250-
return __tlb_remove_page(tlb, page);
251-
}
252-
253-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
254-
struct page *page, int page_size)
255-
{
256-
return tlb_remove_page(tlb, page);
257-
}
258-
259-
/*
260-
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
261-
* PTE, not just those pointing to (normal) physical memory.
262-
*/
263-
static inline void
264-
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
265-
{
266-
if (tlb->start_addr == ~0UL)
267-
tlb->start_addr = address;
268-
tlb->end_addr = address + PAGE_SIZE;
269-
}
270-
27150
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
27251

273-
#define tlb_start_vma(tlb, vma) do { } while (0)
274-
#define tlb_end_vma(tlb, vma) do { } while (0)
275-
276-
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
277-
do { \
278-
tlb->need_flush = 1; \
279-
__tlb_remove_tlb_entry(tlb, ptep, addr); \
280-
} while (0)
281-
282-
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
283-
tlb_remove_tlb_entry(tlb, ptep, address)
284-
285-
static inline void tlb_change_page_size(struct mmu_gather *tlb,
286-
unsigned int page_size)
287-
{
288-
}
289-
290-
#define pte_free_tlb(tlb, ptep, address) \
291-
do { \
292-
tlb->need_flush = 1; \
293-
__pte_free_tlb(tlb, ptep, address); \
294-
} while (0)
295-
296-
#define pmd_free_tlb(tlb, ptep, address) \
297-
do { \
298-
tlb->need_flush = 1; \
299-
__pmd_free_tlb(tlb, ptep, address); \
300-
} while (0)
301-
302-
#define pud_free_tlb(tlb, pudp, address) \
303-
do { \
304-
tlb->need_flush = 1; \
305-
__pud_free_tlb(tlb, pudp, address); \
306-
} while (0)
52+
#include <asm-generic/tlb.h>
30753

30854
#endif /* _ASM_IA64_TLB_H */

arch/ia64/include/asm/tlbflush.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,31 @@
1414
#include <asm/mmu_context.h>
1515
#include <asm/page.h>
1616

17+
struct ia64_tr_entry {
18+
u64 ifa;
19+
u64 itir;
20+
u64 pte;
21+
u64 rr;
22+
}; /*Record for tr entry!*/
23+
24+
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
25+
extern void ia64_ptr_entry(u64 target_mask, int slot);
26+
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
27+
28+
/*
29+
region register macros
30+
*/
31+
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
32+
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
33+
#define RR_VE_MASK 0x0000000000000001L
34+
#define RR_VE_SHIFT 0
35+
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
36+
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
37+
#define RR_PS_MASK 0x00000000000000fcL
38+
#define RR_PS_SHIFT 2
39+
#define RR_RID_MASK 0x00000000ffffff00L
40+
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
41+
1742
/*
1843
* Now for some TLB flushing routines. This is the kind of stuff that
1944
* can be very expensive, so try to avoid them whenever possible.

arch/ia64/mm/tlb.c

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -305,8 +305,8 @@ local_flush_tlb_all (void)
305305
ia64_srlz_i(); /* srlz.i implies srlz.d */
306306
}
307307

308-
void
309-
flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
308+
static void
309+
__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
310310
unsigned long end)
311311
{
312312
struct mm_struct *mm = vma->vm_mm;
@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
343343
preempt_enable();
344344
ia64_srlz_i(); /* srlz.i implies srlz.d */
345345
}
346+
347+
void flush_tlb_range(struct vm_area_struct *vma,
348+
unsigned long start, unsigned long end)
349+
{
350+
if (unlikely(end - start >= 1024*1024*1024*1024UL
351+
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
352+
/*
353+
* If we flush more than a tera-byte or across regions, we're
354+
* probably better off just flushing the entire TLB(s). This
355+
* should be very rare and is not worth optimizing for.
356+
*/
357+
flush_tlb_all();
358+
} else {
359+
/* flush the address range from the tlb */
360+
__flush_tlb_range(vma, start, end);
361+
/* flush the virt. page-table area mapping the addr range */
362+
__flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
363+
}
364+
}
346365
EXPORT_SYMBOL(flush_tlb_range);
347366

348367
void ia64_tlb_init(void)

0 commit comments

Comments
 (0)