Skip to content

Commit f283d0c

Browse files
author
Rafael Aquini
committed
mm: fix copy_vma() error handling for hugetlb mappings
JIRA: https://issues.redhat.com/browse/RHEL-101296 Conflicts: * mm/mremap.c: differences due to RHEL-10 missing upstream commit b714ccb ("mm/mremap: complete refactor of move_vma()") and its accompanying series. commit ee40c99 Author: Ricardo Cañuelo Navarro <[email protected]> Date: Fri May 23 14:19:10 2025 +0200 mm: fix copy_vma() error handling for hugetlb mappings If, during a mremap() operation for a hugetlb-backed memory mapping, copy_vma() fails after the source vma has been duplicated and opened (ie. vma_link() fails), the error is handled by closing the new vma. This updates the hugetlbfs reservation counter of the reservation map which at this point is referenced by both the source vma and the new copy. As a result, once the new vma has been freed and copy_vma() returns, the reservation counter for the source vma will be incorrect. This patch addresses this corner case by clearing the hugetlb private page reservation reference for the new vma and decrementing the reference before closing the vma, so that vma_close() won't update the reservation counter. This is also what copy_vma_and_data() does with the source vma if copy_vma() succeeds, so a helper function has been added to do the fixup in both functions. The issue was reported by a private syzbot instance and can be reproduced using the C reproducer in [1]. It's also a possible duplicate of public syzbot report [2]. The WARNING report is: ============================================================ page_counter underflow: -1024 nr_pages=1024 WARNING: CPU: 0 PID: 3287 at mm/page_counter.c:61 page_counter_cancel+0xf6/0x120 Modules linked in: CPU: 0 UID: 0 PID: 3287 Comm: repro__WARNING_ Not tainted 6.15.0-rc7+ #54 NONE Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-2-gc13ff2cd-prebuilt.qemu.org 04/01/2014 RIP: 0010:page_counter_cancel+0xf6/0x120 Code: ff 5b 41 5e 41 5f 5d c3 cc cc cc cc e8 f3 4f 8f ff c6 05 64 01 27 06 01 48 c7 c7 60 15 f8 85 48 89 de 4c 89 fa e8 2a a7 51 ff <0f> 0b e9 66 ff ff ff 44 89 f9 80 e1 07 38 c1 7c 9d 4c 81 RSP: 0018:ffffc900025df6a0 EFLAGS: 00010246 RAX: 2edfc409ebb44e00 RBX: fffffffffffffc00 RCX: ffff8880155f0000 RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000000000000 RBP: dffffc0000000000 R08: ffffffff81c4a23c R09: 1ffff1100330482a R10: dffffc0000000000 R11: ffffed100330482b R12: 0000000000000000 R13: ffff888058a882c0 R14: ffff888058a882c0 R15: 0000000000000400 FS: 0000000000000000(0000) GS:ffff88808fc53000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000004b33e0 CR3: 00000000076d6000 CR4: 00000000000006f0 Call Trace: <TASK> page_counter_uncharge+0x33/0x80 hugetlb_cgroup_uncharge_counter+0xcb/0x120 hugetlb_vm_op_close+0x579/0x960 ? __pfx_hugetlb_vm_op_close+0x10/0x10 remove_vma+0x88/0x130 exit_mmap+0x71e/0xe00 ? __pfx_exit_mmap+0x10/0x10 ? __mutex_unlock_slowpath+0x22e/0x7f0 ? __pfx_exit_aio+0x10/0x10 ? __up_read+0x256/0x690 ? uprobe_clear_state+0x274/0x290 ? mm_update_next_owner+0xa9/0x810 __mmput+0xc9/0x370 exit_mm+0x203/0x2f0 ? __pfx_exit_mm+0x10/0x10 ? taskstats_exit+0x32b/0xa60 do_exit+0x921/0x2740 ? do_raw_spin_lock+0x155/0x3b0 ? __pfx_do_exit+0x10/0x10 ? __pfx_do_raw_spin_lock+0x10/0x10 ? _raw_spin_lock_irq+0xc5/0x100 do_group_exit+0x20c/0x2c0 get_signal+0x168c/0x1720 ? __pfx_get_signal+0x10/0x10 ? schedule+0x165/0x360 arch_do_signal_or_restart+0x8e/0x7d0 ? __pfx_arch_do_signal_or_restart+0x10/0x10 ? __pfx___se_sys_futex+0x10/0x10 syscall_exit_to_user_mode+0xb8/0x2c0 do_syscall_64+0x75/0x120 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x422dcd Code: Unable to access opcode bytes at 0x422da3. RSP: 002b:00007ff266cdb208 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca RAX: 0000000000000001 RBX: 00007ff266cdbcdc RCX: 0000000000422dcd RDX: 00000000000f4240 RSI: 0000000000000081 RDI: 00000000004c7bec RBP: 00007ff266cdb220 R08: 203a6362696c6720 R09: 203a6362696c6720 R10: 0000200000c00000 R11: 0000000000000246 R12: ffffffffffffffd0 R13: 0000000000000002 R14: 00007ffe1cb5f520 R15: 00007ff266cbb000 </TASK> ============================================================ Link: https://lkml.kernel.org/r/20250523-warning_in_page_counter_cancel-v2-1-b6df1a8cfefd@igalia.com Link: https://people.igalia.com/rcn/kernel_logs/20250422__WARNING_in_page_counter_cancel__repro.c [1] Link: https://lore.kernel.org/all/[email protected]/ [2] Signed-off-by: Ricardo Cañuelo Navarro <[email protected]> Suggested-by: Lorenzo Stoakes <[email protected]> Reviewed-by: Liam R. Howlett <[email protected]> Cc: Florent Revest <[email protected]> Cc: Jann Horn <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Rafael Aquini <[email protected]>
1 parent 71832c7 commit f283d0c

File tree

4 files changed

+22
-4
lines changed

4 files changed

+22
-4
lines changed

include/linux/hugetlb.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
272272
bool is_hugetlb_entry_migration(pte_t pte);
273273
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
274274
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
275+
void fixup_hugetlb_reservations(struct vm_area_struct *vma);
275276

276277
#else /* !CONFIG_HUGETLB_PAGE */
277278

@@ -465,6 +466,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
465466

466467
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
467468

469+
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
470+
{
471+
}
472+
468473
#endif /* !CONFIG_HUGETLB_PAGE */
469474

470475
#ifndef pgd_write

mm/hugetlb.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1218,7 +1218,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
12181218
/*
12191219
* Reset and decrement one ref on hugepage private reservation.
12201220
* Called with mm->mmap_lock writer semaphore held.
1221-
* This function should be only used by move_vma() and operate on
1221+
* This function should be only used by mremap and operate on
12221222
* same sized vma. It should never come here with last ref on the
12231223
* reservation.
12241224
*/
@@ -7573,6 +7573,20 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
75737573
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
75747574
}
75757575

7576+
/*
7577+
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
7578+
* performed, we permit both the old and new VMAs to reference the same
7579+
* reservation.
7580+
*
7581+
* We fix this up after the operation succeeds, or if a newly allocated VMA
7582+
* is closed as a result of a failure to allocate memory.
7583+
*/
7584+
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7585+
{
7586+
if (is_vm_hugetlb_page(vma))
7587+
clear_vma_resv_huge_pages(vma);
7588+
}
7589+
75767590
#ifdef CONFIG_CMA
75777591
static bool cma_reserve_called __initdata;
75787592

mm/mremap.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -782,9 +782,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
782782
mremap_userfaultfd_prep(new_vma, uf);
783783
}
784784

785-
if (is_vm_hugetlb_page(vma)) {
786-
clear_vma_resv_huge_pages(vma);
787-
}
785+
fixup_hugetlb_reservations(vma);
788786

789787
/* Conceal VM_ACCOUNT so old reservation is not undone */
790788
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {

mm/vma.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1772,6 +1772,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
17721772
return new_vma;
17731773

17741774
out_vma_link:
1775+
fixup_hugetlb_reservations(new_vma);
17751776
vma_close(new_vma);
17761777

17771778
if (new_vma->vm_file)

0 commit comments

Comments
 (0)