@@ -410,6 +410,24 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
410410 return old ;
411411}
412412
413+ static pmd_t * pmd_alloc_map (struct mm_struct * mm , unsigned long addr )
414+ {
415+ pgd_t * pgd ;
416+ p4d_t * p4d ;
417+ pud_t * pud ;
418+ pmd_t * pmd ;
419+
420+ pgd = pgd_offset (mm , addr );
421+ p4d = p4d_alloc (mm , pgd , addr );
422+ if (!p4d )
423+ return NULL ;
424+ pud = pud_alloc (mm , p4d , addr );
425+ if (!pud )
426+ return NULL ;
427+ pmd = pmd_alloc (mm , pud , addr );
428+ return pmd ;
429+ }
430+
413431pmd_t pmdp_xchg_direct (struct mm_struct * mm , unsigned long addr ,
414432 pmd_t * pmdp , pmd_t new )
415433{
@@ -734,12 +752,36 @@ EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
734752int set_guest_storage_key (struct mm_struct * mm , unsigned long addr ,
735753 unsigned char key , bool nq )
736754{
737- unsigned long keyul ;
755+ unsigned long keyul , paddr ;
738756 spinlock_t * ptl ;
739757 pgste_t old , new ;
758+ pmd_t * pmdp ;
740759 pte_t * ptep ;
741760
742- ptep = get_locked_pte (mm , addr , & ptl );
761+ pmdp = pmd_alloc_map (mm , addr );
762+ if (unlikely (!pmdp ))
763+ return - EFAULT ;
764+
765+ ptl = pmd_lock (mm , pmdp );
766+ if (!pmd_present (* pmdp )) {
767+ spin_unlock (ptl );
768+ return - EFAULT ;
769+ }
770+
771+ if (pmd_large (* pmdp )) {
772+ paddr = pmd_val (* pmdp ) & HPAGE_MASK ;
773+ paddr |= addr & ~HPAGE_MASK ;
774+ /*
775+ * Huge pmds need quiescing operations, they are
776+ * always mapped.
777+ */
778+ page_set_storage_key (paddr , key , 1 );
779+ spin_unlock (ptl );
780+ return 0 ;
781+ }
782+ spin_unlock (ptl );
783+
784+ ptep = pte_alloc_map_lock (mm , pmdp , addr , & ptl );
743785 if (unlikely (!ptep ))
744786 return - EFAULT ;
745787
@@ -750,14 +792,14 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
750792 pgste_val (new ) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED )) << 48 ;
751793 pgste_val (new ) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT )) << 56 ;
752794 if (!(pte_val (* ptep ) & _PAGE_INVALID )) {
753- unsigned long address , bits , skey ;
795+ unsigned long bits , skey ;
754796
755- address = pte_val (* ptep ) & PAGE_MASK ;
756- skey = (unsigned long ) page_get_storage_key (address );
797+ paddr = pte_val (* ptep ) & PAGE_MASK ;
798+ skey = (unsigned long ) page_get_storage_key (paddr );
757799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED );
758800 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT );
759801 /* Set storage key ACC and FP */
760- page_set_storage_key (address , skey , !nq );
802+ page_set_storage_key (paddr , skey , !nq );
761803 /* Merge host changed & referenced into pgste */
762804 pgste_val (new ) |= bits << 52 ;
763805 }
@@ -813,11 +855,32 @@ EXPORT_SYMBOL(cond_set_guest_storage_key);
813855int reset_guest_reference_bit (struct mm_struct * mm , unsigned long addr )
814856{
815857 spinlock_t * ptl ;
858+ unsigned long paddr ;
816859 pgste_t old , new ;
860+ pmd_t * pmdp ;
817861 pte_t * ptep ;
818862 int cc = 0 ;
819863
820- ptep = get_locked_pte (mm , addr , & ptl );
864+ pmdp = pmd_alloc_map (mm , addr );
865+ if (unlikely (!pmdp ))
866+ return - EFAULT ;
867+
868+ ptl = pmd_lock (mm , pmdp );
869+ if (!pmd_present (* pmdp )) {
870+ spin_unlock (ptl );
871+ return - EFAULT ;
872+ }
873+
874+ if (pmd_large (* pmdp )) {
875+ paddr = pmd_val (* pmdp ) & HPAGE_MASK ;
876+ paddr |= addr & ~HPAGE_MASK ;
877+ cc = page_reset_referenced (paddr );
878+ spin_unlock (ptl );
879+ return cc ;
880+ }
881+ spin_unlock (ptl );
882+
883+ ptep = pte_alloc_map_lock (mm , pmdp , addr , & ptl );
821884 if (unlikely (!ptep ))
822885 return - EFAULT ;
823886
@@ -826,7 +889,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
826889 pgste_val (new ) &= ~PGSTE_GR_BIT ;
827890
828891 if (!(pte_val (* ptep ) & _PAGE_INVALID )) {
829- cc = page_reset_referenced (pte_val (* ptep ) & PAGE_MASK );
892+ paddr = pte_val (* ptep ) & PAGE_MASK ;
893+ cc = page_reset_referenced (paddr );
830894 /* Merge real referenced bit into host-set */
831895 pgste_val (new ) |= ((unsigned long ) cc << 53 ) & PGSTE_HR_BIT ;
832896 }
@@ -845,18 +909,42 @@ EXPORT_SYMBOL(reset_guest_reference_bit);
845909int get_guest_storage_key (struct mm_struct * mm , unsigned long addr ,
846910 unsigned char * key )
847911{
912+ unsigned long paddr ;
848913 spinlock_t * ptl ;
849914 pgste_t pgste ;
915+ pmd_t * pmdp ;
850916 pte_t * ptep ;
851917
852- ptep = get_locked_pte (mm , addr , & ptl );
918+ pmdp = pmd_alloc_map (mm , addr );
919+ if (unlikely (!pmdp ))
920+ return - EFAULT ;
921+
922+ ptl = pmd_lock (mm , pmdp );
923+ if (!pmd_present (* pmdp )) {
924+ /* Not yet mapped memory has a zero key */
925+ spin_unlock (ptl );
926+ * key = 0 ;
927+ return 0 ;
928+ }
929+
930+ if (pmd_large (* pmdp )) {
931+ paddr = pmd_val (* pmdp ) & HPAGE_MASK ;
932+ paddr |= addr & ~HPAGE_MASK ;
933+ * key = page_get_storage_key (paddr );
934+ spin_unlock (ptl );
935+ return 0 ;
936+ }
937+ spin_unlock (ptl );
938+
939+ ptep = pte_alloc_map_lock (mm , pmdp , addr , & ptl );
853940 if (unlikely (!ptep ))
854941 return - EFAULT ;
855942
856943 pgste = pgste_get_lock (ptep );
857944 * key = (pgste_val (pgste ) & (PGSTE_ACC_BITS | PGSTE_FP_BIT )) >> 56 ;
945+ paddr = pte_val (* ptep ) & PAGE_MASK ;
858946 if (!(pte_val (* ptep ) & _PAGE_INVALID ))
859- * key = page_get_storage_key (pte_val ( * ptep ) & PAGE_MASK );
947+ * key = page_get_storage_key (paddr );
860948 /* Reflect guest's logical view, not physical */
861949 * key |= (pgste_val (pgste ) & (PGSTE_GR_BIT | PGSTE_GC_BIT )) >> 48 ;
862950 pgste_set_unlock (ptep , pgste );
0 commit comments