Skip to content

Commit c97a9e1

Browse files
Nick PigginLinus Torvalds
authored andcommitted
mm: more rmap checking
Re-introduce rmap verification patches that Hugh removed when he removed PG_map_lock. PG_map_lock actually isn't needed to synchronise access to anonymous pages, because PG_locked and PTL together already do. These checks were important in discovering and fixing a rare rmap corruption in SLES9. Signed-off-by: Nick Piggin <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent ea12589 commit c97a9e1

File tree

3 files changed

+62
-11
lines changed

3 files changed

+62
-11
lines changed

include/linux/rmap.h

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
7474
void page_add_file_rmap(struct page *);
7575
void page_remove_rmap(struct page *, struct vm_area_struct *);
7676

77-
/**
78-
* page_dup_rmap - duplicate pte mapping to a page
79-
* @page: the page to add the mapping to
80-
*
81-
* For copy_page_range only: minimal extract from page_add_rmap,
82-
* avoiding unnecessary tests (already checked) so it's quicker.
83-
*/
84-
static inline void page_dup_rmap(struct page *page)
77+
#ifdef CONFIG_DEBUG_VM
78+
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
79+
#else
80+
static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
8581
{
8682
atomic_inc(&page->_mapcount);
8783
}
84+
#endif
8885

8986
/*
9087
* Called from mm/vmscan.c to handle paging out

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
481481
page = vm_normal_page(vma, addr, pte);
482482
if (page) {
483483
get_page(page);
484-
page_dup_rmap(page);
484+
page_dup_rmap(page, vma, addr);
485485
rss[!!PageAnon(page)]++;
486486
}
487487

mm/rmap.c

Lines changed: 56 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -529,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page,
529529
__inc_zone_page_state(page, NR_ANON_PAGES);
530530
}
531531

532+
/**
533+
* page_set_anon_rmap - sanity check anonymous rmap addition
534+
* @page: the page to add the mapping to
535+
* @vma: the vm area in which the mapping is added
536+
* @address: the user virtual address mapped
537+
*/
538+
static void __page_check_anon_rmap(struct page *page,
539+
struct vm_area_struct *vma, unsigned long address)
540+
{
541+
#ifdef CONFIG_DEBUG_VM
542+
/*
543+
* The page's anon-rmap details (mapping and index) are guaranteed to
544+
* be set up correctly at this point.
545+
*
546+
* We have exclusion against page_add_anon_rmap because the caller
547+
* always holds the page locked, except if called from page_dup_rmap,
548+
* in which case the page is already known to be setup.
549+
*
550+
* We have exclusion against page_add_new_anon_rmap because those pages
551+
* are initially only visible via the pagetables, and the pte is locked
552+
* over the call to page_add_new_anon_rmap.
553+
*/
554+
struct anon_vma *anon_vma = vma->anon_vma;
555+
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
556+
BUG_ON(page->mapping != (struct address_space *)anon_vma);
557+
BUG_ON(page->index != linear_page_index(vma, address));
558+
#endif
559+
}
560+
532561
/**
533562
* page_add_anon_rmap - add pte mapping to an anonymous page
534563
* @page: the page to add the mapping to
535564
* @vma: the vm area in which the mapping is added
536565
* @address: the user virtual address mapped
537566
*
538-
* The caller needs to hold the pte lock.
567+
* The caller needs to hold the pte lock and the page must be locked.
539568
*/
540569
void page_add_anon_rmap(struct page *page,
541570
struct vm_area_struct *vma, unsigned long address)
542571
{
572+
VM_BUG_ON(!PageLocked(page));
573+
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
543574
if (atomic_inc_and_test(&page->_mapcount))
544575
__page_set_anon_rmap(page, vma, address);
545-
/* else checking page index and mapping is racy */
576+
else
577+
__page_check_anon_rmap(page, vma, address);
546578
}
547579

548580
/*
@@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
553585
*
554586
* Same as page_add_anon_rmap but must only be called on *new* pages.
555587
* This means the inc-and-test can be bypassed.
588+
* Page does not have to be locked.
556589
*/
557590
void page_add_new_anon_rmap(struct page *page,
558591
struct vm_area_struct *vma, unsigned long address)
559592
{
593+
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
560594
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
561595
__page_set_anon_rmap(page, vma, address);
562596
}
@@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page)
573607
__inc_zone_page_state(page, NR_FILE_MAPPED);
574608
}
575609

610+
#ifdef CONFIG_DEBUG_VM
611+
/**
612+
* page_dup_rmap - duplicate pte mapping to a page
613+
* @page: the page to add the mapping to
614+
*
615+
* For copy_page_range only: minimal extract from page_add_file_rmap /
616+
* page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617+
* quicker.
618+
*
619+
* The caller needs to hold the pte lock.
620+
*/
621+
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
622+
{
623+
BUG_ON(page_mapcount(page) == 0);
624+
if (PageAnon(page))
625+
__page_check_anon_rmap(page, vma, address);
626+
atomic_inc(&page->_mapcount);
627+
}
628+
#endif
629+
576630
/**
577631
* page_remove_rmap - take down pte mapping from a page
578632
* @page: page to remove mapping from

0 commit comments

Comments
 (0)