@@ -172,10 +172,23 @@ extern pgd_t swapper_pg_dir[];
172172#define __S110 PAGE_SHARED_EXEC
173173#define __S111 PAGE_SHARED_EXEC
174174
175+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
176+ static inline int pmd_present (pmd_t pmd )
177+ {
178+ /*
179+ * Checking for _PAGE_LEAF is needed too because:
180+ * When splitting a THP, split_huge_page() will temporarily clear
181+ * the present bit, in this situation, pmd_present() and
182+ * pmd_trans_huge() still needs to return true.
183+ */
184+ return (pmd_val (pmd ) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF ));
185+ }
186+ #else
175187static inline int pmd_present (pmd_t pmd )
176188{
177189 return (pmd_val (pmd ) & (_PAGE_PRESENT | _PAGE_PROT_NONE ));
178190}
191+ #endif
179192
180193static inline int pmd_none (pmd_t pmd )
181194{
@@ -369,6 +382,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
369382 local_flush_tlb_page (address );
370383}
371384
385+ static inline void update_mmu_cache_pmd (struct vm_area_struct * vma ,
386+ unsigned long address , pmd_t * pmdp )
387+ {
388+ pte_t * ptep = (pte_t * )pmdp ;
389+
390+ update_mmu_cache (vma , address , ptep );
391+ }
392+
372393#define __HAVE_ARCH_PTE_SAME
373394static inline int pte_same (pte_t pte_a , pte_t pte_b )
374395{
@@ -462,6 +483,141 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
462483 return ptep_test_and_clear_young (vma , address , ptep );
463484}
464485
486+ /*
487+ * THP functions
488+ */
489+ static inline pmd_t pte_pmd (pte_t pte )
490+ {
491+ return __pmd (pte_val (pte ));
492+ }
493+
494+ static inline pmd_t pmd_mkhuge (pmd_t pmd )
495+ {
496+ return pmd ;
497+ }
498+
499+ static inline pmd_t pmd_mkinvalid (pmd_t pmd )
500+ {
501+ return __pmd (pmd_val (pmd ) & ~(_PAGE_PRESENT |_PAGE_PROT_NONE ));
502+ }
503+
504+ #define __pmd_to_phys (pmd ) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
505+
506+ static inline unsigned long pmd_pfn (pmd_t pmd )
507+ {
508+ return ((__pmd_to_phys (pmd ) & PMD_MASK ) >> PAGE_SHIFT );
509+ }
510+
511+ static inline pmd_t mk_pmd (struct page * page , pgprot_t prot )
512+ {
513+ return pfn_pmd (page_to_pfn (page ), prot );
514+ }
515+
516+ static inline pmd_t pmd_modify (pmd_t pmd , pgprot_t newprot )
517+ {
518+ return pte_pmd (pte_modify (pmd_pte (pmd ), newprot ));
519+ }
520+
521+ #define pmd_write pmd_write
522+ static inline int pmd_write (pmd_t pmd )
523+ {
524+ return pte_write (pmd_pte (pmd ));
525+ }
526+
527+ static inline int pmd_dirty (pmd_t pmd )
528+ {
529+ return pte_dirty (pmd_pte (pmd ));
530+ }
531+
532+ static inline int pmd_young (pmd_t pmd )
533+ {
534+ return pte_young (pmd_pte (pmd ));
535+ }
536+
537+ static inline pmd_t pmd_mkold (pmd_t pmd )
538+ {
539+ return pte_pmd (pte_mkold (pmd_pte (pmd )));
540+ }
541+
542+ static inline pmd_t pmd_mkyoung (pmd_t pmd )
543+ {
544+ return pte_pmd (pte_mkyoung (pmd_pte (pmd )));
545+ }
546+
547+ static inline pmd_t pmd_mkwrite (pmd_t pmd )
548+ {
549+ return pte_pmd (pte_mkwrite (pmd_pte (pmd )));
550+ }
551+
552+ static inline pmd_t pmd_wrprotect (pmd_t pmd )
553+ {
554+ return pte_pmd (pte_wrprotect (pmd_pte (pmd )));
555+ }
556+
557+ static inline pmd_t pmd_mkclean (pmd_t pmd )
558+ {
559+ return pte_pmd (pte_mkclean (pmd_pte (pmd )));
560+ }
561+
562+ static inline pmd_t pmd_mkdirty (pmd_t pmd )
563+ {
564+ return pte_pmd (pte_mkdirty (pmd_pte (pmd )));
565+ }
566+
567+ static inline void set_pmd_at (struct mm_struct * mm , unsigned long addr ,
568+ pmd_t * pmdp , pmd_t pmd )
569+ {
570+ return set_pte_at (mm , addr , (pte_t * )pmdp , pmd_pte (pmd ));
571+ }
572+
573+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
574+ static inline int pmd_trans_huge (pmd_t pmd )
575+ {
576+ return pmd_leaf (pmd );
577+ }
578+
579+ #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
580+ static inline int pmdp_set_access_flags (struct vm_area_struct * vma ,
581+ unsigned long address , pmd_t * pmdp ,
582+ pmd_t entry , int dirty )
583+ {
584+ return ptep_set_access_flags (vma , address , (pte_t * )pmdp , pmd_pte (entry ), dirty );
585+ }
586+
587+ #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
588+ static inline int pmdp_test_and_clear_young (struct vm_area_struct * vma ,
589+ unsigned long address , pmd_t * pmdp )
590+ {
591+ return ptep_test_and_clear_young (vma , address , (pte_t * )pmdp );
592+ }
593+
594+ #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
595+ static inline pmd_t pmdp_huge_get_and_clear (struct mm_struct * mm ,
596+ unsigned long address , pmd_t * pmdp )
597+ {
598+ return pte_pmd (ptep_get_and_clear (mm , address , (pte_t * )pmdp ));
599+ }
600+
601+ #define __HAVE_ARCH_PMDP_SET_WRPROTECT
602+ static inline void pmdp_set_wrprotect (struct mm_struct * mm ,
603+ unsigned long address , pmd_t * pmdp )
604+ {
605+ ptep_set_wrprotect (mm , address , (pte_t * )pmdp );
606+ }
607+
608+ #define pmdp_establish pmdp_establish
609+ static inline pmd_t pmdp_establish (struct vm_area_struct * vma ,
610+ unsigned long address , pmd_t * pmdp , pmd_t pmd )
611+ {
612+ return __pmd (atomic_long_xchg ((atomic_long_t * )pmdp , pmd_val (pmd )));
613+ }
614+
615+ #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
616+ void flush_pmd_tlb_range (struct vm_area_struct * vma , unsigned long start ,
617+ unsigned long end );
618+
619+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
620+
465621/*
466622 * Encode and decode a swap entry
467623 *
0 commit comments