Skip to content

Commit c5b27a8

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sh/tlb: Convert SH to generic mmu_gather
Generic mmu_gather provides everything SH needs (range tracking and cache coherency). No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Nick Piggin <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rich Felker <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Will Deacon <[email protected]> Cc: Yoshinori Sato <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent e154700 commit c5b27a8

File tree

2 files changed

+10
-129
lines changed

2 files changed

+10
-129
lines changed

arch/sh/include/asm/pgalloc.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,15 @@ do { \
7070
tlb_remove_page((tlb), (pte)); \
7171
} while (0)
7272

73+
#if CONFIG_PGTABLE_LEVELS > 2
74+
#define __pmd_free_tlb(tlb, pmdp, addr) \
75+
do { \
76+
struct page *page = virt_to_page(pmdp); \
77+
pgtable_pmd_page_dtor(page); \
78+
tlb_remove_page((tlb), page); \
79+
} while (0);
80+
#endif
81+
7382
static inline void check_pgt_cache(void)
7483
{
7584
quicklist_trim(QUICK_PT, NULL, 25, 16);

arch/sh/include/asm/tlb.h

Lines changed: 1 addition & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -11,131 +11,8 @@
1111

1212
#ifdef CONFIG_MMU
1313
#include <linux/swap.h>
14-
#include <asm/pgalloc.h>
15-
#include <asm/tlbflush.h>
16-
#include <asm/mmu_context.h>
1714

18-
/*
19-
* TLB handling. This allows us to remove pages from the page
20-
* tables, and efficiently handle the TLB issues.
21-
*/
22-
struct mmu_gather {
23-
struct mm_struct *mm;
24-
unsigned int fullmm;
25-
unsigned long start, end;
26-
};
27-
28-
static inline void init_tlb_gather(struct mmu_gather *tlb)
29-
{
30-
tlb->start = TASK_SIZE;
31-
tlb->end = 0;
32-
33-
if (tlb->fullmm) {
34-
tlb->start = 0;
35-
tlb->end = TASK_SIZE;
36-
}
37-
}
38-
39-
static inline void
40-
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
41-
unsigned long start, unsigned long end)
42-
{
43-
tlb->mm = mm;
44-
tlb->start = start;
45-
tlb->end = end;
46-
tlb->fullmm = !(start | (end+1));
47-
48-
init_tlb_gather(tlb);
49-
}
50-
51-
static inline void
52-
arch_tlb_finish_mmu(struct mmu_gather *tlb,
53-
unsigned long start, unsigned long end, bool force)
54-
{
55-
if (tlb->fullmm || force)
56-
flush_tlb_mm(tlb->mm);
57-
58-
/* keep the page table cache within bounds */
59-
check_pgt_cache();
60-
}
61-
62-
static inline void
63-
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
64-
{
65-
if (tlb->start > address)
66-
tlb->start = address;
67-
if (tlb->end < address + PAGE_SIZE)
68-
tlb->end = address + PAGE_SIZE;
69-
}
70-
71-
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
72-
tlb_remove_tlb_entry(tlb, ptep, address)
73-
74-
/*
75-
* In the case of tlb vma handling, we can optimise these away in the
76-
* case where we're doing a full MM flush. When we're doing a munmap,
77-
* the vmas are adjusted to only cover the region to be torn down.
78-
*/
79-
static inline void
80-
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
81-
{
82-
if (!tlb->fullmm)
83-
flush_cache_range(vma, vma->vm_start, vma->vm_end);
84-
}
85-
86-
static inline void
87-
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
88-
{
89-
if (!tlb->fullmm && tlb->end) {
90-
flush_tlb_range(vma, tlb->start, tlb->end);
91-
init_tlb_gather(tlb);
92-
}
93-
}
94-
95-
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
96-
{
97-
}
98-
99-
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
100-
{
101-
}
102-
103-
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
104-
{
105-
}
106-
107-
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
108-
{
109-
free_page_and_swap_cache(page);
110-
return false; /* avoid calling tlb_flush_mmu */
111-
}
112-
113-
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
114-
{
115-
__tlb_remove_page(tlb, page);
116-
}
117-
118-
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
119-
struct page *page, int page_size)
120-
{
121-
return __tlb_remove_page(tlb, page);
122-
}
123-
124-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
125-
struct page *page, int page_size)
126-
{
127-
return tlb_remove_page(tlb, page);
128-
}
129-
130-
static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size)
131-
{
132-
}
133-
134-
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
135-
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
136-
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
137-
138-
#define tlb_migrate_finish(mm) do { } while (0)
15+
#include <asm-generic/tlb.h>
13916

14017
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
14118
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
@@ -155,11 +32,6 @@ static inline void tlb_unwire_entry(void)
15532

15633
#else /* CONFIG_MMU */
15734

158-
#define tlb_start_vma(tlb, vma) do { } while (0)
159-
#define tlb_end_vma(tlb, vma) do { } while (0)
160-
#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
161-
#define tlb_flush(tlb) do { } while (0)
162-
16335
#include <asm-generic/tlb.h>
16436

16537
#endif /* CONFIG_MMU */

0 commit comments

Comments
 (0)