@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
4444}
4545EXPORT_SYMBOL_GPL (iomap_free );
4646
47- void * kmap_atomic_prot_pfn (unsigned long pfn , pgprot_t prot )
48- {
49- unsigned long vaddr ;
50- int idx , type ;
51-
52- preempt_disable ();
53- pagefault_disable ();
54-
55- type = kmap_atomic_idx_push ();
56- idx = type + KM_TYPE_NR * smp_processor_id ();
57- vaddr = __fix_to_virt (FIX_KMAP_BEGIN + idx );
58- set_pte (kmap_pte - idx , pfn_pte (pfn , prot ));
59- arch_flush_lazy_mmu_mode ();
60-
61- return (void * )vaddr ;
62- }
63-
64- /*
65- * Map 'pfn' using protections 'prot'
66- */
67- void __iomem *
68- iomap_atomic_prot_pfn (unsigned long pfn , pgprot_t prot )
47+ void __iomem * iomap_atomic_pfn_prot (unsigned long pfn , pgprot_t prot )
6948{
7049 /*
7150 * For non-PAT systems, translate non-WB request to UC- just in
@@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
8160 /* Filter out unsupported __PAGE_KERNEL* bits: */
8261 pgprot_val (prot ) &= __default_kernel_pte_mask ;
8362
84- return (void __force __iomem * ) kmap_atomic_prot_pfn (pfn , prot );
85- }
86- EXPORT_SYMBOL_GPL (iomap_atomic_prot_pfn );
87-
88- void
89- iounmap_atomic (void __iomem * kvaddr )
90- {
91- unsigned long vaddr = (unsigned long ) kvaddr & PAGE_MASK ;
92-
93- if (vaddr >= __fix_to_virt (FIX_KMAP_END ) &&
94- vaddr <= __fix_to_virt (FIX_KMAP_BEGIN )) {
95- int idx , type ;
96-
97- type = kmap_atomic_idx ();
98- idx = type + KM_TYPE_NR * smp_processor_id ();
99-
100- #ifdef CONFIG_DEBUG_HIGHMEM
101- WARN_ON_ONCE (vaddr != __fix_to_virt (FIX_KMAP_BEGIN + idx ));
102- #endif
103- /*
104- * Force other mappings to Oops if they'll try to access this
105- * pte without first remap it. Keeping stale mappings around
106- * is a bad idea also, in case the page changes cacheability
107- * attributes or becomes a protected page in a hypervisor.
108- */
109- kpte_clear_flush (kmap_pte - idx , vaddr );
110- kmap_atomic_idx_pop ();
111- }
112-
113- pagefault_enable ();
114- preempt_enable ();
63+ preempt_disable ();
64+ pagefault_disable ();
65+ return (void __force __iomem * )__kmap_local_pfn_prot (pfn , prot );
11566}
116- EXPORT_SYMBOL_GPL (iounmap_atomic );
67+ EXPORT_SYMBOL_GPL (iomap_atomic_pfn_prot );
0 commit comments