@@ -529,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page,
529529 __inc_zone_page_state (page , NR_ANON_PAGES );
530530}
531531
532+ /**
533+ * page_set_anon_rmap - sanity check anonymous rmap addition
534+ * @page: the page to add the mapping to
535+ * @vma: the vm area in which the mapping is added
536+ * @address: the user virtual address mapped
537+ */
538+ static void __page_check_anon_rmap (struct page * page ,
539+ struct vm_area_struct * vma , unsigned long address )
540+ {
541+ #ifdef CONFIG_DEBUG_VM
542+ /*
543+ * The page's anon-rmap details (mapping and index) are guaranteed to
544+ * be set up correctly at this point.
545+ *
546+ * We have exclusion against page_add_anon_rmap because the caller
547+ * always holds the page locked, except if called from page_dup_rmap,
548+ * in which case the page is already known to be setup.
549+ *
550+ * We have exclusion against page_add_new_anon_rmap because those pages
551+ * are initially only visible via the pagetables, and the pte is locked
552+ * over the call to page_add_new_anon_rmap.
553+ */
554+ struct anon_vma * anon_vma = vma -> anon_vma ;
555+ anon_vma = (void * ) anon_vma + PAGE_MAPPING_ANON ;
556+ BUG_ON (page -> mapping != (struct address_space * )anon_vma );
557+ BUG_ON (page -> index != linear_page_index (vma , address ));
558+ #endif
559+ }
560+
532561/**
533562 * page_add_anon_rmap - add pte mapping to an anonymous page
534563 * @page: the page to add the mapping to
535564 * @vma: the vm area in which the mapping is added
536565 * @address: the user virtual address mapped
537566 *
538- * The caller needs to hold the pte lock.
567+ * The caller needs to hold the pte lock and the page must be locked .
539568 */
540569void page_add_anon_rmap (struct page * page ,
541570 struct vm_area_struct * vma , unsigned long address )
542571{
572+ VM_BUG_ON (!PageLocked (page ));
573+ VM_BUG_ON (address < vma -> vm_start || address >= vma -> vm_end );
543574 if (atomic_inc_and_test (& page -> _mapcount ))
544575 __page_set_anon_rmap (page , vma , address );
545- /* else checking page index and mapping is racy */
576+ else
577+ __page_check_anon_rmap (page , vma , address );
546578}
547579
548580/*
@@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
553585 *
554586 * Same as page_add_anon_rmap but must only be called on *new* pages.
555587 * This means the inc-and-test can be bypassed.
588+ * Page does not have to be locked.
556589 */
557590void page_add_new_anon_rmap (struct page * page ,
558591 struct vm_area_struct * vma , unsigned long address )
559592{
593+ BUG_ON (address < vma -> vm_start || address >= vma -> vm_end );
560594 atomic_set (& page -> _mapcount , 0 ); /* elevate count by 1 (starts at -1) */
561595 __page_set_anon_rmap (page , vma , address );
562596}
@@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page)
573607 __inc_zone_page_state (page , NR_FILE_MAPPED );
574608}
575609
610+ #ifdef CONFIG_DEBUG_VM
611+ /**
612+ * page_dup_rmap - duplicate pte mapping to a page
613+ * @page: the page to add the mapping to
614+ *
615+ * For copy_page_range only: minimal extract from page_add_file_rmap /
616+ * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617+ * quicker.
618+ *
619+ * The caller needs to hold the pte lock.
620+ */
621+ void page_dup_rmap (struct page * page , struct vm_area_struct * vma , unsigned long address )
622+ {
623+ BUG_ON (page_mapcount (page ) == 0 );
624+ if (PageAnon (page ))
625+ __page_check_anon_rmap (page , vma , address );
626+ atomic_inc (& page -> _mapcount );
627+ }
628+ #endif
629+
576630/**
577631 * page_remove_rmap - take down pte mapping from a page
578632 * @page: page to remove mapping from
0 commit comments