Skip to content

Commit 157e118

Browse files
committed
x86/mm/highmem: Use generic kmap atomic implementation
Convert X86 to the generic kmap atomic implementation and make the iomap_atomic() naming convention consistent while at it. Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 389755c commit 157e118

File tree

12 files changed

+31
-161
lines changed

12 files changed

+31
-161
lines changed

arch/x86/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,11 @@ config X86_32
1414
select ARCH_WANT_IPC_PARSE_VERSION
1515
select CLKSRC_I8253
1616
select CLONE_BACKWARDS
17+
select GENERIC_VDSO_32
1718
select HAVE_DEBUG_STACKOVERFLOW
19+
select KMAP_LOCAL
1820
select MODULES_USE_ELF_REL
1921
select OLD_SIGACTION
20-
select GENERIC_VDSO_32
2122

2223
config X86_64
2324
def_bool y

arch/x86/include/asm/fixmap.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
#include <asm/pgtable_types.h>
3232
#ifdef CONFIG_X86_32
3333
#include <linux/threads.h>
34-
#include <asm/kmap_types.h>
34+
#include <asm/kmap_size.h>
3535
#else
3636
#include <uapi/asm/vsyscall.h>
3737
#endif
@@ -94,7 +94,7 @@ enum fixed_addresses {
9494
#endif
9595
#ifdef CONFIG_X86_32
9696
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
97-
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
97+
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
9898
#ifdef CONFIG_PCI_MMCONFIG
9999
FIX_PCIE_MCFG,
100100
#endif
@@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned long reserve);
151151

152152
extern int fixmaps_set;
153153

154-
extern pte_t *kmap_pte;
155154
extern pte_t *pkmap_page_table;
156155

157156
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);

arch/x86/include/asm/highmem.h

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323

2424
#include <linux/interrupt.h>
2525
#include <linux/threads.h>
26-
#include <asm/kmap_types.h>
2726
#include <asm/tlbflush.h>
2827
#include <asm/paravirt.h>
2928
#include <asm/fixmap.h>
@@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, highend_pfn;
5857
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
5958
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
6059

61-
void *kmap_atomic_pfn(unsigned long pfn);
62-
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
63-
6460
#define flush_cache_kmaps() do { } while (0)
6561

62+
#define arch_kmap_local_post_map(vaddr, pteval) \
63+
arch_flush_lazy_mmu_mode()
64+
65+
#define arch_kmap_local_post_unmap(vaddr) \
66+
do { \
67+
flush_tlb_one_kernel((vaddr)); \
68+
arch_flush_lazy_mmu_mode(); \
69+
} while (0)
70+
6671
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
6772
unsigned long end_pfn);
6873

arch/x86/include/asm/iomap.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,21 @@
99
#include <linux/fs.h>
1010
#include <linux/mm.h>
1111
#include <linux/uaccess.h>
12+
#include <linux/highmem.h>
1213
#include <asm/cacheflush.h>
1314
#include <asm/tlbflush.h>
1415

15-
void __iomem *
16-
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
16+
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
1717

18-
void
19-
iounmap_atomic(void __iomem *kvaddr);
18+
static inline void iounmap_atomic(void __iomem *vaddr)
19+
{
20+
kunmap_local_indexed((void __force *)vaddr);
21+
pagefault_enable();
22+
preempt_enable();
23+
}
2024

21-
int
22-
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
25+
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
2326

24-
void
25-
iomap_free(resource_size_t base, unsigned long size);
27+
void iomap_free(resource_size_t base, unsigned long size);
2628

2729
#endif /* _ASM_X86_IOMAP_H */

arch/x86/include/asm/kmap_types.h

Lines changed: 0 additions & 13 deletions
This file was deleted.

arch/x86/include/asm/paravirt_types.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
#ifndef __ASSEMBLY__
4242

4343
#include <asm/desc_defs.h>
44-
#include <asm/kmap_types.h>
4544
#include <asm/pgtable_types.h>
4645
#include <asm/nospec-branch.h>
4746

arch/x86/mm/highmem_32.c

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -4,65 +4,6 @@
44
#include <linux/swap.h> /* for totalram_pages */
55
#include <linux/memblock.h>
66

7-
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
8-
{
9-
unsigned long vaddr;
10-
int idx, type;
11-
12-
type = kmap_atomic_idx_push();
13-
idx = type + KM_TYPE_NR*smp_processor_id();
14-
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15-
BUG_ON(!pte_none(*(kmap_pte-idx)));
16-
set_pte(kmap_pte-idx, mk_pte(page, prot));
17-
arch_flush_lazy_mmu_mode();
18-
19-
return (void *)vaddr;
20-
}
21-
EXPORT_SYMBOL(kmap_atomic_high_prot);
22-
23-
/*
24-
* This is the same as kmap_atomic() but can map memory that doesn't
25-
* have a struct page associated with it.
26-
*/
27-
void *kmap_atomic_pfn(unsigned long pfn)
28-
{
29-
return kmap_atomic_prot_pfn(pfn, kmap_prot);
30-
}
31-
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
32-
33-
void kunmap_atomic_high(void *kvaddr)
34-
{
35-
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
36-
37-
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
38-
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
39-
int idx, type;
40-
41-
type = kmap_atomic_idx();
42-
idx = type + KM_TYPE_NR * smp_processor_id();
43-
44-
#ifdef CONFIG_DEBUG_HIGHMEM
45-
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
46-
#endif
47-
/*
48-
* Force other mappings to Oops if they'll try to access this
49-
* pte without first remap it. Keeping stale mappings around
50-
* is a bad idea also, in case the page changes cacheability
51-
* attributes or becomes a protected page in a hypervisor.
52-
*/
53-
kpte_clear_flush(kmap_pte-idx, vaddr);
54-
kmap_atomic_idx_pop();
55-
arch_flush_lazy_mmu_mode();
56-
}
57-
#ifdef CONFIG_DEBUG_HIGHMEM
58-
else {
59-
BUG_ON(vaddr < PAGE_OFFSET);
60-
BUG_ON(vaddr >= (unsigned long)high_memory);
61-
}
62-
#endif
63-
}
64-
EXPORT_SYMBOL(kunmap_atomic_high);
65-
667
void __init set_highmem_pages_init(void)
678
{
689
struct zone *zone;

arch/x86/mm/init_32.c

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned long start,
394394
return last_map_addr;
395395
}
396396

397-
pte_t *kmap_pte;
398-
399-
static void __init kmap_init(void)
400-
{
401-
unsigned long kmap_vstart;
402-
403-
/*
404-
* Cache the first kmap pte:
405-
*/
406-
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
407-
kmap_pte = virt_to_kpte(kmap_vstart);
408-
}
409-
410397
#ifdef CONFIG_HIGHMEM
411398
static void __init permanent_kmaps_init(pgd_t *pgd_base)
412399
{
@@ -712,8 +699,6 @@ void __init paging_init(void)
712699

713700
__flush_tlb_all();
714701

715-
kmap_init();
716-
717702
/*
718703
* NOTE: at this point the bootmem allocator is fully available.
719704
*/

arch/x86/mm/iomap_32.c

Lines changed: 5 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
4444
}
4545
EXPORT_SYMBOL_GPL(iomap_free);
4646

47-
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
48-
{
49-
unsigned long vaddr;
50-
int idx, type;
51-
52-
preempt_disable();
53-
pagefault_disable();
54-
55-
type = kmap_atomic_idx_push();
56-
idx = type + KM_TYPE_NR * smp_processor_id();
57-
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58-
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
59-
arch_flush_lazy_mmu_mode();
60-
61-
return (void *)vaddr;
62-
}
63-
64-
/*
65-
* Map 'pfn' using protections 'prot'
66-
*/
67-
void __iomem *
68-
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
47+
void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
6948
{
7049
/*
7150
* For non-PAT systems, translate non-WB request to UC- just in
@@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
8160
/* Filter out unsupported __PAGE_KERNEL* bits: */
8261
pgprot_val(prot) &= __default_kernel_pte_mask;
8362

84-
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
85-
}
86-
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
87-
88-
void
89-
iounmap_atomic(void __iomem *kvaddr)
90-
{
91-
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
92-
93-
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
94-
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
95-
int idx, type;
96-
97-
type = kmap_atomic_idx();
98-
idx = type + KM_TYPE_NR * smp_processor_id();
99-
100-
#ifdef CONFIG_DEBUG_HIGHMEM
101-
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102-
#endif
103-
/*
104-
* Force other mappings to Oops if they'll try to access this
105-
* pte without first remap it. Keeping stale mappings around
106-
* is a bad idea also, in case the page changes cacheability
107-
* attributes or becomes a protected page in a hypervisor.
108-
*/
109-
kpte_clear_flush(kmap_pte-idx, vaddr);
110-
kmap_atomic_idx_pop();
111-
}
112-
113-
pagefault_enable();
114-
preempt_enable();
63+
preempt_disable();
64+
pagefault_disable();
65+
return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
11566
}
116-
EXPORT_SYMBOL_GPL(iounmap_atomic);
67+
EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);

include/linux/highmem.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ static inline void __kunmap_atomic(void *addr)
217217
#endif /* CONFIG_HIGHMEM */
218218

219219
#if !defined(CONFIG_KMAP_LOCAL)
220-
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
220+
#if defined(CONFIG_HIGHMEM)
221221

222222
DECLARE_PER_CPU(int, __kmap_atomic_idx);
223223

0 commit comments

Comments
 (0)