Skip to content

Commit 26d21b1

Browse files
ioworker0akpm00
authored andcommitted
mm/rmap: remove duplicated exit code in pagewalk loop
Patch series "Reclaim lazyfree THP without splitting", v8. This series adds support for reclaiming PMD-mapped THP marked as lazyfree without needing to first split the large folio via split_huge_pmd_address(). When the user no longer requires the pages, they would use madvise(MADV_FREE) to mark the pages as lazy free. Subsequently, they typically would not re-write to that memory again. During memory reclaim, if we detect that the large folio and its PMD are both still marked as clean and there are no unexpected references(such as GUP), so we can just discard the memory lazily, improving the efficiency of memory reclamation in this case. Performance Testing =================== On an Intel i5 CPU, reclaiming 1GiB of lazyfree THPs using mem_cgroup_force_empty() results in the following runtimes in seconds (shorter is better): -------------------------------------------- | Old | New | Change | -------------------------------------------- | 0.683426 | 0.049197 | -92.80% | -------------------------------------------- This patch (of 8): Introduce the labels walk_done and walk_abort as exit points to eliminate duplicated exit code in the pagewalk loop. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Lance Yang <[email protected]> Reviewed-by: Zi Yan <[email protected]> Reviewed-by: Baolin Wang <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Reviewed-by: Barry Song <[email protected]> Cc: Bang Li <[email protected]> Cc: Fangrui Song <[email protected]> Cc: Jeff Xie <[email protected]> Cc: Kefeng Wang <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Muchun Song <[email protected]> Cc: Peter Xu <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Yang Shi <[email protected]> Cc: Yin Fengwei <[email protected]> Cc: Zach O'Keefe <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 9ba85f5 commit 26d21b1

File tree

1 file changed

+15
-25
lines changed

1 file changed

+15
-25
lines changed

mm/rmap.c

Lines changed: 15 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1681,9 +1681,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
16811681
/* Restore the mlock which got missed */
16821682
if (!folio_test_large(folio))
16831683
mlock_vma_folio(folio, vma);
1684-
page_vma_mapped_walk_done(&pvmw);
1685-
ret = false;
1686-
break;
1684+
goto walk_abort;
16871685
}
16881686

16891687
pfn = pte_pfn(ptep_get(pvmw.pte));
@@ -1721,11 +1719,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
17211719
*/
17221720
if (!anon) {
17231721
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1724-
if (!hugetlb_vma_trylock_write(vma)) {
1725-
page_vma_mapped_walk_done(&pvmw);
1726-
ret = false;
1727-
break;
1728-
}
1722+
if (!hugetlb_vma_trylock_write(vma))
1723+
goto walk_abort;
17291724
if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
17301725
hugetlb_vma_unlock_write(vma);
17311726
flush_tlb_range(vma,
@@ -1740,8 +1735,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
17401735
* actual page and drop map count
17411736
* to zero.
17421737
*/
1743-
page_vma_mapped_walk_done(&pvmw);
1744-
break;
1738+
goto walk_done;
17451739
}
17461740
hugetlb_vma_unlock_write(vma);
17471741
}
@@ -1813,9 +1807,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
18131807
if (unlikely(folio_test_swapbacked(folio) !=
18141808
folio_test_swapcache(folio))) {
18151809
WARN_ON_ONCE(1);
1816-
ret = false;
1817-
page_vma_mapped_walk_done(&pvmw);
1818-
break;
1810+
goto walk_abort;
18191811
}
18201812

18211813
/* MADV_FREE page check */
@@ -1854,33 +1846,25 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
18541846
*/
18551847
set_pte_at(mm, address, pvmw.pte, pteval);
18561848
folio_set_swapbacked(folio);
1857-
ret = false;
1858-
page_vma_mapped_walk_done(&pvmw);
1859-
break;
1849+
goto walk_abort;
18601850
}
18611851

18621852
if (swap_duplicate(entry) < 0) {
18631853
set_pte_at(mm, address, pvmw.pte, pteval);
1864-
ret = false;
1865-
page_vma_mapped_walk_done(&pvmw);
1866-
break;
1854+
goto walk_abort;
18671855
}
18681856
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
18691857
swap_free(entry);
18701858
set_pte_at(mm, address, pvmw.pte, pteval);
1871-
ret = false;
1872-
page_vma_mapped_walk_done(&pvmw);
1873-
break;
1859+
goto walk_abort;
18741860
}
18751861

18761862
/* See folio_try_share_anon_rmap(): clear PTE first. */
18771863
if (anon_exclusive &&
18781864
folio_try_share_anon_rmap_pte(folio, subpage)) {
18791865
swap_free(entry);
18801866
set_pte_at(mm, address, pvmw.pte, pteval);
1881-
ret = false;
1882-
page_vma_mapped_walk_done(&pvmw);
1883-
break;
1867+
goto walk_abort;
18841868
}
18851869
if (list_empty(&mm->mmlist)) {
18861870
spin_lock(&mmlist_lock);
@@ -1920,6 +1904,12 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
19201904
if (vma->vm_flags & VM_LOCKED)
19211905
mlock_drain_local();
19221906
folio_put(folio);
1907+
continue;
1908+
walk_abort:
1909+
ret = false;
1910+
walk_done:
1911+
page_vma_mapped_walk_done(&pvmw);
1912+
break;
19231913
}
19241914

19251915
mmu_notifier_invalidate_range_end(&range);

0 commit comments

Comments
 (0)