@@ -4724,6 +4724,20 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
47244724 paging64_init_context_common (vcpu , context , PT32E_ROOT_LEVEL );
47254725}
47264726
4727+ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext (struct kvm_vcpu * vcpu )
4728+ {
4729+ union kvm_mmu_extended_role ext = {0 };
4730+
4731+ ext .cr4_smep = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMEP );
4732+ ext .cr4_smap = !!kvm_read_cr4_bits (vcpu , X86_CR4_SMAP );
4733+ ext .cr4_pse = !!is_pse (vcpu );
4734+ ext .cr4_pke = !!kvm_read_cr4_bits (vcpu , X86_CR4_PKE );
4735+
4736+ ext .valid = 1 ;
4737+
4738+ return ext ;
4739+ }
4740+
47274741static union kvm_mmu_page_role
47284742kvm_calc_tdp_mmu_root_page_role (struct kvm_vcpu * vcpu )
47294743{
@@ -4830,19 +4844,23 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
48304844}
48314845EXPORT_SYMBOL_GPL (kvm_init_shadow_mmu );
48324846
4833- static union kvm_mmu_page_role
4834- kvm_calc_shadow_ept_root_page_role (struct kvm_vcpu * vcpu , bool accessed_dirty )
4847+ static union kvm_mmu_role
4848+ kvm_calc_shadow_ept_root_page_role (struct kvm_vcpu * vcpu , bool accessed_dirty ,
4849+ bool execonly )
48354850{
4836- union kvm_mmu_page_role role ;
4851+ union kvm_mmu_role role ;
48374852
4838- /* Role is inherited from root_mmu */
4839- role .word = vcpu -> arch .root_mmu .base_role .word ;
4853+ /* Base role is inherited from root_mmu */
4854+ role .base .word = vcpu -> arch .root_mmu .mmu_role .base .word ;
4855+ role .ext = kvm_calc_mmu_role_ext (vcpu );
48404856
4841- role .level = PT64_ROOT_4LEVEL ;
4842- role .direct = false;
4843- role .ad_disabled = !accessed_dirty ;
4844- role .guest_mode = true;
4845- role .access = ACC_ALL ;
4857+ role .base .level = PT64_ROOT_4LEVEL ;
4858+ role .base .direct = false;
4859+ role .base .ad_disabled = !accessed_dirty ;
4860+ role .base .guest_mode = true;
4861+ role .base .access = ACC_ALL ;
4862+
4863+ role .ext .execonly = execonly ;
48464864
48474865 return role ;
48484866}
@@ -4851,10 +4869,16 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
48514869 bool accessed_dirty , gpa_t new_eptp )
48524870{
48534871 struct kvm_mmu * context = vcpu -> arch .mmu ;
4854- union kvm_mmu_page_role root_page_role =
4855- kvm_calc_shadow_ept_root_page_role (vcpu , accessed_dirty );
4872+ union kvm_mmu_role new_role =
4873+ kvm_calc_shadow_ept_root_page_role (vcpu , accessed_dirty ,
4874+ execonly );
4875+
4876+ __kvm_mmu_new_cr3 (vcpu , new_eptp , new_role .base , false);
4877+
4878+ new_role .base .word &= mmu_base_role_mask .word ;
4879+ if (new_role .as_u64 == context -> mmu_role .as_u64 )
4880+ return ;
48564881
4857- __kvm_mmu_new_cr3 (vcpu , new_eptp , root_page_role , false);
48584882 context -> shadow_root_level = PT64_ROOT_4LEVEL ;
48594883
48604884 context -> nx = true;
@@ -4866,8 +4890,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
48664890 context -> update_pte = ept_update_pte ;
48674891 context -> root_level = PT64_ROOT_4LEVEL ;
48684892 context -> direct_map = false;
4869- context -> mmu_role .base .word =
4870- root_page_role .word & mmu_base_role_mask .word ;
4893+ context -> mmu_role .as_u64 = new_role .as_u64 ;
48714894
48724895 update_permission_bitmask (vcpu , context , true);
48734896 update_pkru_bitmask (vcpu , context , true);
0 commit comments