Skip to content

Commit 15fa3e8

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mips: implement the new page table range API
Rename _PFN_SHIFT to PFN_PTE_SHIFT. Convert a few places to call set_pte() instead of set_pte_at(). Add set_ptes(), update_mmu_cache_range(), flush_icache_pages() and flush_dcache_folio(). Change the PG_arch_1 (aka PG_dcache_dirty) flag from being per-page to per-folio. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Mike Rapoport (IBM) <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 27a8b94 commit 15fa3e8

File tree

12 files changed

+121
-86
lines changed

12 files changed

+121
-86
lines changed

arch/mips/bcm47xx/prom.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ void __init prom_init(void)
116116
#if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM)
117117

118118
#define EXTVBASE 0xc0000000
119-
#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> _PFN_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6) | 1)
119+
#define ENTRYLO(x) ((pte_val(pfn_pte((x) >> PFN_PTE_SHIFT, PAGE_KERNEL_UNCACHED)) >> 6) | 1)
120120

121121
#include <asm/tlbflush.h>
122122

arch/mips/include/asm/cacheflush.h

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,12 @@
3636
*/
3737
#define PG_dcache_dirty PG_arch_1
3838

39-
#define Page_dcache_dirty(page) \
40-
test_bit(PG_dcache_dirty, &(page)->flags)
41-
#define SetPageDcacheDirty(page) \
42-
set_bit(PG_dcache_dirty, &(page)->flags)
43-
#define ClearPageDcacheDirty(page) \
44-
clear_bit(PG_dcache_dirty, &(page)->flags)
39+
#define folio_test_dcache_dirty(folio) \
40+
test_bit(PG_dcache_dirty, &(folio)->flags)
41+
#define folio_set_dcache_dirty(folio) \
42+
set_bit(PG_dcache_dirty, &(folio)->flags)
43+
#define folio_clear_dcache_dirty(folio) \
44+
clear_bit(PG_dcache_dirty, &(folio)->flags)
4545

4646
extern void (*flush_cache_all)(void);
4747
extern void (*__flush_cache_all)(void);
@@ -50,15 +50,24 @@ extern void (*flush_cache_mm)(struct mm_struct *mm);
5050
extern void (*flush_cache_range)(struct vm_area_struct *vma,
5151
unsigned long start, unsigned long end);
5252
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53-
extern void __flush_dcache_page(struct page *page);
53+
extern void __flush_dcache_pages(struct page *page, unsigned int nr);
5454

5555
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
56+
static inline void flush_dcache_folio(struct folio *folio)
57+
{
58+
if (cpu_has_dc_aliases)
59+
__flush_dcache_pages(&folio->page, folio_nr_pages(folio));
60+
else if (!cpu_has_ic_fills_f_dc)
61+
folio_set_dcache_dirty(folio);
62+
}
63+
#define flush_dcache_folio flush_dcache_folio
64+
5665
static inline void flush_dcache_page(struct page *page)
5766
{
5867
if (cpu_has_dc_aliases)
59-
__flush_dcache_page(page);
68+
__flush_dcache_pages(page, 1);
6069
else if (!cpu_has_ic_fills_f_dc)
61-
SetPageDcacheDirty(page);
70+
folio_set_dcache_dirty(page_folio(page));
6271
}
6372

6473
#define flush_dcache_mmap_lock(mapping) do { } while (0)
@@ -73,10 +82,11 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
7382
__flush_anon_page(page, vmaddr);
7483
}
7584

76-
static inline void flush_icache_page(struct vm_area_struct *vma,
77-
struct page *page)
85+
static inline void flush_icache_pages(struct vm_area_struct *vma,
86+
struct page *page, unsigned int nr)
7887
{
7988
}
89+
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
8090

8191
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
8292
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);

arch/mips/include/asm/pgtable-32.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -153,15 +153,15 @@ static inline void pmd_clear(pmd_t *pmdp)
153153
#if defined(CONFIG_XPA)
154154

155155
#define MAX_POSSIBLE_PHYSMEM_BITS 40
156-
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
156+
#define pte_pfn(x) (((unsigned long)((x).pte_high >> PFN_PTE_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
157157
static inline pte_t
158158
pfn_pte(unsigned long pfn, pgprot_t prot)
159159
{
160160
pte_t pte;
161161

162162
pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
163163
(pgprot_val(prot) & ~_PFNX_MASK);
164-
pte.pte_high = (pfn << _PFN_SHIFT) |
164+
pte.pte_high = (pfn << PFN_PTE_SHIFT) |
165165
(pgprot_val(prot) & ~_PFN_MASK);
166166
return pte;
167167
}
@@ -184,9 +184,9 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
184184
#else
185185

186186
#define MAX_POSSIBLE_PHYSMEM_BITS 32
187-
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
188-
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
189-
#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
187+
#define pte_pfn(x) ((unsigned long)((x).pte >> PFN_PTE_SHIFT))
188+
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
189+
#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
190190
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
191191

192192
#define pte_page(x) pfn_to_page(pte_pfn(x))

arch/mips/include/asm/pgtable-64.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -298,9 +298,9 @@ static inline void pud_clear(pud_t *pudp)
298298

299299
#define pte_page(x) pfn_to_page(pte_pfn(x))
300300

301-
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
302-
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
303-
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
301+
#define pte_pfn(x) ((unsigned long)((x).pte >> PFN_PTE_SHIFT))
302+
#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
303+
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
304304

305305
#ifndef __PAGETABLE_PMD_FOLDED
306306
static inline pmd_t *pud_pgtable(pud_t pud)

arch/mips/include/asm/pgtable-bits.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -182,10 +182,10 @@ enum pgtable_bits {
182182
#if defined(CONFIG_CPU_R3K_TLB)
183183
# define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
184184
# define _CACHE_MASK _CACHE_UNCACHED
185-
# define _PFN_SHIFT PAGE_SHIFT
185+
# define PFN_PTE_SHIFT PAGE_SHIFT
186186
#else
187187
# define _CACHE_MASK (7 << _CACHE_SHIFT)
188-
# define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
188+
# define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
189189
#endif
190190

191191
#ifndef _PAGE_NO_EXEC
@@ -195,7 +195,7 @@ enum pgtable_bits {
195195
#define _PAGE_SILENT_READ _PAGE_VALID
196196
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
197197

198-
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
198+
#define _PFN_MASK (~((1 << (PFN_PTE_SHIFT)) - 1))
199199

200200
/*
201201
* The final layouts of the PTE bits are:

arch/mips/include/asm/pgtable.h

Lines changed: 41 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ extern void paging_init(void);
6666

6767
static inline unsigned long pmd_pfn(pmd_t pmd)
6868
{
69-
return pmd_val(pmd) >> _PFN_SHIFT;
69+
return pmd_val(pmd) >> PFN_PTE_SHIFT;
7070
}
7171

7272
#ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -105,9 +105,6 @@ do { \
105105
} \
106106
} while(0)
107107

108-
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
109-
pte_t *ptep, pte_t pteval);
110-
111108
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
112109

113110
#ifdef CONFIG_XPA
@@ -157,7 +154,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
157154
null.pte_low = null.pte_high = _PAGE_GLOBAL;
158155
}
159156

160-
set_pte_at(mm, addr, ptep, null);
157+
set_pte(ptep, null);
161158
htw_start();
162159
}
163160
#else
@@ -196,28 +193,41 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
196193
#if !defined(CONFIG_CPU_R3K_TLB)
197194
/* Preserve global status for the pair */
198195
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
199-
set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
196+
set_pte(ptep, __pte(_PAGE_GLOBAL));
200197
else
201198
#endif
202-
set_pte_at(mm, addr, ptep, __pte(0));
199+
set_pte(ptep, __pte(0));
203200
htw_start();
204201
}
205202
#endif
206203

207-
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
208-
pte_t *ptep, pte_t pteval)
204+
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
205+
pte_t *ptep, pte_t pte, unsigned int nr)
209206
{
207+
unsigned int i;
208+
bool do_sync = false;
210209

211-
if (!pte_present(pteval))
212-
goto cache_sync_done;
210+
for (i = 0; i < nr; i++) {
211+
if (!pte_present(pte))
212+
continue;
213+
if (pte_present(ptep[i]) &&
214+
(pte_pfn(ptep[i]) == pte_pfn(pte)))
215+
continue;
216+
do_sync = true;
217+
}
213218

214-
if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
215-
goto cache_sync_done;
219+
if (do_sync)
220+
__update_cache(addr, pte);
216221

217-
__update_cache(addr, pteval);
218-
cache_sync_done:
219-
set_pte(ptep, pteval);
222+
for (;;) {
223+
set_pte(ptep, pte);
224+
if (--nr == 0)
225+
break;
226+
ptep++;
227+
pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
228+
}
220229
}
230+
#define set_ptes set_ptes
221231

222232
/*
223233
* (pmds are folded into puds so this doesn't get actually called,
@@ -486,7 +496,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
486496
pte_t entry, int dirty)
487497
{
488498
if (!pte_same(*ptep, entry))
489-
set_pte_at(vma->vm_mm, address, ptep, entry);
499+
set_pte(ptep, entry);
490500
/*
491501
* update_mmu_cache will unconditionally execute, handling both
492502
* the case that the PTE changed and the spurious fault case.
@@ -568,12 +578,21 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
568578
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
569579
pte_t pte);
570580

571-
static inline void update_mmu_cache(struct vm_area_struct *vma,
572-
unsigned long address, pte_t *ptep)
573-
{
574-
pte_t pte = *ptep;
575-
__update_tlb(vma, address, pte);
581+
static inline void update_mmu_cache_range(struct vm_fault *vmf,
582+
struct vm_area_struct *vma, unsigned long address,
583+
pte_t *ptep, unsigned int nr)
584+
{
585+
for (;;) {
586+
pte_t pte = *ptep;
587+
__update_tlb(vma, address, pte);
588+
if (--nr == 0)
589+
break;
590+
ptep++;
591+
address += PAGE_SIZE;
592+
}
576593
}
594+
#define update_mmu_cache(vma, address, ptep) \
595+
update_mmu_cache_range(NULL, vma, address, ptep, 1)
577596

578597
#define __HAVE_ARCH_UPDATE_MMU_TLB
579598
#define update_mmu_tlb update_mmu_cache

arch/mips/mm/c-r4k.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -568,13 +568,14 @@ static inline void local_r4k_flush_cache_page(void *args)
568568
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
569569
vaddr = NULL;
570570
else {
571+
struct folio *folio = page_folio(page);
571572
/*
572573
* Use kmap_coherent or kmap_atomic to do flushes for
573574
* another ASID than the current one.
574575
*/
575576
map_coherent = (cpu_has_dc_aliases &&
576-
page_mapcount(page) &&
577-
!Page_dcache_dirty(page));
577+
folio_mapped(folio) &&
578+
!folio_test_dcache_dirty(folio));
578579
if (map_coherent)
579580
vaddr = kmap_coherent(page, addr);
580581
else

arch/mips/mm/cache.c

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -99,13 +99,15 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
9999
return 0;
100100
}
101101

102-
void __flush_dcache_page(struct page *page)
102+
void __flush_dcache_pages(struct page *page, unsigned int nr)
103103
{
104-
struct address_space *mapping = page_mapping_file(page);
104+
struct folio *folio = page_folio(page);
105+
struct address_space *mapping = folio_flush_mapping(folio);
105106
unsigned long addr;
107+
unsigned int i;
106108

107109
if (mapping && !mapping_mapped(mapping)) {
108-
SetPageDcacheDirty(page);
110+
folio_set_dcache_dirty(folio);
109111
return;
110112
}
111113

@@ -114,25 +116,21 @@ void __flush_dcache_page(struct page *page)
114116
* case is for exec env/arg pages and those are %99 certainly going to
115117
* get faulted into the tlb (and thus flushed) anyways.
116118
*/
117-
if (PageHighMem(page))
118-
addr = (unsigned long)kmap_atomic(page);
119-
else
120-
addr = (unsigned long)page_address(page);
121-
122-
flush_data_cache_page(addr);
123-
124-
if (PageHighMem(page))
125-
kunmap_atomic((void *)addr);
119+
for (i = 0; i < nr; i++) {
120+
addr = (unsigned long)kmap_local_page(page + i);
121+
flush_data_cache_page(addr);
122+
kunmap_local((void *)addr);
123+
}
126124
}
127-
128-
EXPORT_SYMBOL(__flush_dcache_page);
125+
EXPORT_SYMBOL(__flush_dcache_pages);
129126

130127
void __flush_anon_page(struct page *page, unsigned long vmaddr)
131128
{
132129
unsigned long addr = (unsigned long) page_address(page);
130+
struct folio *folio = page_folio(page);
133131

134132
if (pages_do_alias(addr, vmaddr)) {
135-
if (page_mapcount(page) && !Page_dcache_dirty(page)) {
133+
if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
136134
void *kaddr;
137135

138136
kaddr = kmap_coherent(page, vmaddr);
@@ -147,27 +145,29 @@ EXPORT_SYMBOL(__flush_anon_page);
147145

148146
void __update_cache(unsigned long address, pte_t pte)
149147
{
150-
struct page *page;
148+
struct folio *folio;
151149
unsigned long pfn, addr;
152150
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
151+
unsigned int i;
153152

154153
pfn = pte_pfn(pte);
155154
if (unlikely(!pfn_valid(pfn)))
156155
return;
157-
page = pfn_to_page(pfn);
158-
if (Page_dcache_dirty(page)) {
159-
if (PageHighMem(page))
160-
addr = (unsigned long)kmap_atomic(page);
161-
else
162-
addr = (unsigned long)page_address(page);
163-
164-
if (exec || pages_do_alias(addr, address & PAGE_MASK))
165-
flush_data_cache_page(addr);
166156

167-
if (PageHighMem(page))
168-
kunmap_atomic((void *)addr);
157+
folio = page_folio(pfn_to_page(pfn));
158+
address &= PAGE_MASK;
159+
address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
160+
161+
if (folio_test_dcache_dirty(folio)) {
162+
for (i = 0; i < folio_nr_pages(folio); i++) {
163+
addr = (unsigned long)kmap_local_folio(folio, i);
169164

170-
ClearPageDcacheDirty(page);
165+
if (exec || pages_do_alias(addr, address))
166+
flush_data_cache_page(addr);
167+
kunmap_local((void *)addr);
168+
address += PAGE_SIZE;
169+
}
170+
folio_clear_dcache_dirty(folio);
171171
}
172172
}
173173

0 commit comments

Comments
 (0)