|
22 | 22 | #include <asm/page-states.h> |
23 | 23 | #include <asm/pgalloc.h> |
24 | 24 | #include <asm/machine.h> |
| 25 | +#include <asm/gmap_helpers.h> |
25 | 26 | #include <asm/gmap.h> |
26 | 27 | #include <asm/page.h> |
27 | 28 |
|
@@ -619,63 +620,20 @@ EXPORT_SYMBOL(__gmap_link); |
619 | 620 | */ |
620 | 621 | void __gmap_zap(struct gmap *gmap, unsigned long gaddr) |
621 | 622 | { |
622 | | - struct vm_area_struct *vma; |
623 | 623 | unsigned long vmaddr; |
624 | | - spinlock_t *ptl; |
625 | | - pte_t *ptep; |
| 624 | + |
| 625 | + mmap_assert_locked(gmap->mm); |
626 | 626 |
|
627 | 627 | /* Find the vm address for the guest address */ |
628 | 628 | vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, |
629 | 629 | gaddr >> PMD_SHIFT); |
630 | 630 | if (vmaddr) { |
631 | 631 | vmaddr |= gaddr & ~PMD_MASK; |
632 | | - |
633 | | - vma = vma_lookup(gmap->mm, vmaddr); |
634 | | - if (!vma || is_vm_hugetlb_page(vma)) |
635 | | - return; |
636 | | - |
637 | | - /* Get pointer to the page table entry */ |
638 | | - ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); |
639 | | - if (likely(ptep)) { |
640 | | - ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); |
641 | | - pte_unmap_unlock(ptep, ptl); |
642 | | - } |
| 632 | + gmap_helper_zap_one_page(gmap->mm, vmaddr); |
643 | 633 | } |
644 | 634 | } |
645 | 635 | EXPORT_SYMBOL_GPL(__gmap_zap); |
646 | 636 |
|
647 | | -void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) |
648 | | -{ |
649 | | - unsigned long gaddr, vmaddr, size; |
650 | | - struct vm_area_struct *vma; |
651 | | - |
652 | | - mmap_read_lock(gmap->mm); |
653 | | - for (gaddr = from; gaddr < to; |
654 | | - gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { |
655 | | - /* Find the vm address for the guest address */ |
656 | | - vmaddr = (unsigned long) |
657 | | - radix_tree_lookup(&gmap->guest_to_host, |
658 | | - gaddr >> PMD_SHIFT); |
659 | | - if (!vmaddr) |
660 | | - continue; |
661 | | - vmaddr |= gaddr & ~PMD_MASK; |
662 | | - /* Find vma in the parent mm */ |
663 | | - vma = find_vma(gmap->mm, vmaddr); |
664 | | - if (!vma) |
665 | | - continue; |
666 | | - /* |
667 | | - * We do not discard pages that are backed by |
668 | | - * hugetlbfs, so we don't have to refault them. |
669 | | - */ |
670 | | - if (is_vm_hugetlb_page(vma)) |
671 | | - continue; |
672 | | - size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); |
673 | | - zap_page_range_single(vma, vmaddr, size, NULL); |
674 | | - } |
675 | | - mmap_read_unlock(gmap->mm); |
676 | | -} |
677 | | -EXPORT_SYMBOL_GPL(gmap_discard); |
678 | | - |
679 | 637 | static LIST_HEAD(gmap_notifier_list); |
680 | 638 | static DEFINE_SPINLOCK(gmap_notifier_lock); |
681 | 639 |
|
@@ -2268,138 +2226,6 @@ int s390_enable_sie(void) |
2268 | 2226 | } |
2269 | 2227 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
2270 | 2228 |
|
2271 | | -static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr, |
2272 | | - unsigned long end, struct mm_walk *walk) |
2273 | | -{ |
2274 | | - unsigned long *found_addr = walk->private; |
2275 | | - |
2276 | | - /* Return 1 of the page is a zeropage. */ |
2277 | | - if (is_zero_pfn(pte_pfn(*pte))) { |
2278 | | - /* |
2279 | | - * Shared zeropage in e.g., a FS DAX mapping? We cannot do the |
2280 | | - * right thing and likely don't care: FAULT_FLAG_UNSHARE |
2281 | | - * currently only works in COW mappings, which is also where |
2282 | | - * mm_forbids_zeropage() is checked. |
2283 | | - */ |
2284 | | - if (!is_cow_mapping(walk->vma->vm_flags)) |
2285 | | - return -EFAULT; |
2286 | | - |
2287 | | - *found_addr = addr; |
2288 | | - return 1; |
2289 | | - } |
2290 | | - return 0; |
2291 | | -} |
2292 | | - |
2293 | | -static const struct mm_walk_ops find_zeropage_ops = { |
2294 | | - .pte_entry = find_zeropage_pte_entry, |
2295 | | - .walk_lock = PGWALK_WRLOCK, |
2296 | | -}; |
2297 | | - |
2298 | | -/* |
2299 | | - * Unshare all shared zeropages, replacing them by anonymous pages. Note that |
2300 | | - * we cannot simply zap all shared zeropages, because this could later |
2301 | | - * trigger unexpected userfaultfd missing events. |
2302 | | - * |
2303 | | - * This must be called after mm->context.allow_cow_sharing was |
2304 | | - * set to 0, to avoid future mappings of shared zeropages. |
2305 | | - * |
2306 | | - * mm contracts with s390, that even if mm were to remove a page table, |
2307 | | - * and racing with walk_page_range_vma() calling pte_offset_map_lock() |
2308 | | - * would fail, it will never insert a page table containing empty zero |
2309 | | - * pages once mm_forbids_zeropage(mm) i.e. |
2310 | | - * mm->context.allow_cow_sharing is set to 0. |
2311 | | - */ |
2312 | | -static int __s390_unshare_zeropages(struct mm_struct *mm) |
2313 | | -{ |
2314 | | - struct vm_area_struct *vma; |
2315 | | - VMA_ITERATOR(vmi, mm, 0); |
2316 | | - unsigned long addr; |
2317 | | - vm_fault_t fault; |
2318 | | - int rc; |
2319 | | - |
2320 | | - for_each_vma(vmi, vma) { |
2321 | | - /* |
2322 | | - * We could only look at COW mappings, but it's more future |
2323 | | - * proof to catch unexpected zeropages in other mappings and |
2324 | | - * fail. |
2325 | | - */ |
2326 | | - if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) |
2327 | | - continue; |
2328 | | - addr = vma->vm_start; |
2329 | | - |
2330 | | -retry: |
2331 | | - rc = walk_page_range_vma(vma, addr, vma->vm_end, |
2332 | | - &find_zeropage_ops, &addr); |
2333 | | - if (rc < 0) |
2334 | | - return rc; |
2335 | | - else if (!rc) |
2336 | | - continue; |
2337 | | - |
2338 | | - /* addr was updated by find_zeropage_pte_entry() */ |
2339 | | - fault = handle_mm_fault(vma, addr, |
2340 | | - FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, |
2341 | | - NULL); |
2342 | | - if (fault & VM_FAULT_OOM) |
2343 | | - return -ENOMEM; |
2344 | | - /* |
2345 | | - * See break_ksm(): even after handle_mm_fault() returned 0, we |
2346 | | - * must start the lookup from the current address, because |
2347 | | - * handle_mm_fault() may back out if there's any difficulty. |
2348 | | - * |
2349 | | - * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but |
2350 | | - * maybe they could trigger in the future on concurrent |
2351 | | - * truncation. In that case, the shared zeropage would be gone |
2352 | | - * and we can simply retry and make progress. |
2353 | | - */ |
2354 | | - cond_resched(); |
2355 | | - goto retry; |
2356 | | - } |
2357 | | - |
2358 | | - return 0; |
2359 | | -} |
2360 | | - |
2361 | | -static int __s390_disable_cow_sharing(struct mm_struct *mm) |
2362 | | -{ |
2363 | | - int rc; |
2364 | | - |
2365 | | - if (!mm->context.allow_cow_sharing) |
2366 | | - return 0; |
2367 | | - |
2368 | | - mm->context.allow_cow_sharing = 0; |
2369 | | - |
2370 | | - /* Replace all shared zeropages by anonymous pages. */ |
2371 | | - rc = __s390_unshare_zeropages(mm); |
2372 | | - /* |
2373 | | - * Make sure to disable KSM (if enabled for the whole process or |
2374 | | - * individual VMAs). Note that nothing currently hinders user space |
2375 | | - * from re-enabling it. |
2376 | | - */ |
2377 | | - if (!rc) |
2378 | | - rc = ksm_disable(mm); |
2379 | | - if (rc) |
2380 | | - mm->context.allow_cow_sharing = 1; |
2381 | | - return rc; |
2382 | | -} |
2383 | | - |
2384 | | -/* |
2385 | | - * Disable most COW-sharing of memory pages for the whole process: |
2386 | | - * (1) Disable KSM and unmerge/unshare any KSM pages. |
2387 | | - * (2) Disallow shared zeropages and unshare any zerpages that are mapped. |
2388 | | - * |
2389 | | - * Not that we currently don't bother with COW-shared pages that are shared |
2390 | | - * with parent/child processes due to fork(). |
2391 | | - */ |
2392 | | -int s390_disable_cow_sharing(void) |
2393 | | -{ |
2394 | | - int rc; |
2395 | | - |
2396 | | - mmap_write_lock(current->mm); |
2397 | | - rc = __s390_disable_cow_sharing(current->mm); |
2398 | | - mmap_write_unlock(current->mm); |
2399 | | - return rc; |
2400 | | -} |
2401 | | -EXPORT_SYMBOL_GPL(s390_disable_cow_sharing); |
2402 | | - |
2403 | 2229 | /* |
2404 | 2230 | * Enable storage key handling from now on and initialize the storage |
2405 | 2231 | * keys with the default key. |
@@ -2467,7 +2293,7 @@ int s390_enable_skey(void) |
2467 | 2293 | goto out_up; |
2468 | 2294 |
|
2469 | 2295 | mm->context.uses_skeys = 1; |
2470 | | - rc = __s390_disable_cow_sharing(mm); |
| 2296 | + rc = gmap_helper_disable_cow_sharing(); |
2471 | 2297 | if (rc) { |
2472 | 2298 | mm->context.uses_skeys = 0; |
2473 | 2299 | goto out_up; |
|
0 commit comments