Skip to content

Commit 6ed31ba

Browse files
Kefeng Wangakpm00
authored andcommitted
mm: memory: check userfaultfd_wp() in vmf_orig_pte_uffd_wp()
Add userfaultfd_wp() check in vmf_orig_pte_uffd_wp() to avoid the unnecessary FAULT_FLAG_ORIG_PTE_VALID check/pte_marker_entry_uffd_wp() in most pagefault, note, the function vmf_orig_pte_uffd_wp() is not inlined in the two kernel versions, the difference is shown below, perf date, perf report -i perf.data.before | grep vmf 0.17% 0.13% lat_pagefault [kernel.kallsyms] [k] vmf_orig_pte_uffd_wp.part.0.isra.0 perf report -i perf.data.after | grep vmf lat_pagefault -W 5 -N 5 /tmp/XXX latency before after diff average(8 tests) 0.262675 0.2600375 -0.0026375 Although it's a small, but the uffd_wp is a new feature than previous kernel, when the vma is not registered with UFFD_WP, let's avoid to execute the new logical, also adding __always_inline attribute to vmf_orig_pte_uffd_wp(), which make set_pte_range() only check VM_UFFD_WP flags without the function call. In addition, directly call the vmf_orig_pte_uffd_wp() in do_anonymous_page() and set_pte_range() to save an uffd_wp variable. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Kefeng Wang <[email protected]> Reviewed-by: Peter Xu <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 2d8b272 commit 6ed31ba

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

mm/memory.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,10 @@ static bool vmf_pte_changed(struct vm_fault *vmf);
112112
* Return true if the original pte was a uffd-wp pte marker (so the pte was
113113
* wr-protected).
114114
*/
115-
static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
115+
static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
116116
{
117+
if (!userfaultfd_wp(vmf->vma))
118+
return false;
117119
if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
118120
return false;
119121

@@ -4393,7 +4395,6 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
43934395
*/
43944396
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
43954397
{
4396-
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
43974398
struct vm_area_struct *vma = vmf->vma;
43984399
unsigned long addr = vmf->address;
43994400
struct folio *folio;
@@ -4493,7 +4494,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
44934494
folio_add_new_anon_rmap(folio, vma, addr);
44944495
folio_add_lru_vma(folio, vma);
44954496
setpte:
4496-
if (uffd_wp)
4497+
if (vmf_orig_pte_uffd_wp(vmf))
44974498
entry = pte_mkuffd_wp(entry);
44984499
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
44994500

@@ -4668,7 +4669,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
46684669
struct page *page, unsigned int nr, unsigned long addr)
46694670
{
46704671
struct vm_area_struct *vma = vmf->vma;
4671-
bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
46724672
bool write = vmf->flags & FAULT_FLAG_WRITE;
46734673
bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
46744674
pte_t entry;
@@ -4683,7 +4683,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
46834683

46844684
if (write)
46854685
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4686-
if (unlikely(uffd_wp))
4686+
if (unlikely(vmf_orig_pte_uffd_wp(vmf)))
46874687
entry = pte_mkuffd_wp(entry);
46884688
/* copy-on-write page */
46894689
if (write && !(vma->vm_flags & VM_SHARED)) {

0 commit comments

Comments
 (0)