@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
12391239 return vmx_capability .vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT ;
12401240}
12411241
1242+ static inline bool cpu_has_vmx_invvpid (void )
1243+ {
1244+ return vmx_capability .vpid & VMX_VPID_INVVPID_BIT ;
1245+ }
1246+
12421247static inline bool cpu_has_vmx_ept (void )
12431248{
12441249 return vmcs_config .cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
27532758 SECONDARY_EXEC_RDTSCP |
27542759 SECONDARY_EXEC_DESC |
27552760 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756- SECONDARY_EXEC_ENABLE_VPID |
27572761 SECONDARY_EXEC_APIC_REGISTER_VIRT |
27582762 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
27592763 SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
27812785 * though it is treated as global context. The alternative is
27822786 * not failing the single-context invvpid, and it is worse.
27832787 */
2784- if (enable_vpid )
2788+ if (enable_vpid ) {
2789+ vmx -> nested .nested_vmx_secondary_ctls_high |=
2790+ SECONDARY_EXEC_ENABLE_VPID ;
27852791 vmx -> nested .nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
27862792 VMX_VPID_EXTENT_SUPPORTED_MASK ;
2787- else
2793+ } else
27882794 vmx -> nested .nested_vmx_vpid_caps = 0 ;
27892795
27902796 if (enable_unrestricted_guest )
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
40244030 __vmx_flush_tlb (vcpu , to_vmx (vcpu )-> vpid );
40254031}
40264032
4033+ static void vmx_flush_tlb_ept_only (struct kvm_vcpu * vcpu )
4034+ {
4035+ if (enable_ept )
4036+ vmx_flush_tlb (vcpu );
4037+ }
4038+
40274039static void vmx_decache_cr0_guest_bits (struct kvm_vcpu * vcpu )
40284040{
40294041 ulong cr0_guest_owned_bits = vcpu -> arch .cr0_guest_owned_bits ;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
65176529 if (boot_cpu_has (X86_FEATURE_NX ))
65186530 kvm_enable_efer_bits (EFER_NX );
65196531
6520- if (!cpu_has_vmx_vpid ())
6532+ if (!cpu_has_vmx_vpid () || !cpu_has_vmx_invvpid () ||
6533+ !(cpu_has_vmx_invvpid_single () || cpu_has_vmx_invvpid_global ()))
65216534 enable_vpid = 0 ;
6535+
65226536 if (!cpu_has_vmx_shadow_vmcs ())
65236537 enable_shadow_vmcs = 0 ;
65246538 if (enable_shadow_vmcs )
@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
85018515 && kvm_vmx_exit_handlers [exit_reason ])
85028516 return kvm_vmx_exit_handlers [exit_reason ](vcpu );
85038517 else {
8504- WARN_ONCE (1 , "vmx: unexpected exit reason 0x%x\n" , exit_reason );
8518+ vcpu_unimpl (vcpu , "vmx: unexpected exit reason 0x%x\n" ,
8519+ exit_reason );
85058520 kvm_queue_exception (vcpu , UD_VECTOR );
85068521 return 1 ;
85078522 }
@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
85478562 } else {
85488563 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE ;
85498564 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ;
8565+ vmx_flush_tlb_ept_only (vcpu );
85508566 }
85518567 vmcs_write32 (SECONDARY_VM_EXEC_CONTROL , sec_exec_control );
85528568
@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
85728588 */
85738589 if (!is_guest_mode (vcpu ) ||
85748590 !nested_cpu_has2 (get_vmcs12 (& vmx -> vcpu ),
8575- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ))
8591+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )) {
85768592 vmcs_write64 (APIC_ACCESS_ADDR , hpa );
8593+ vmx_flush_tlb_ept_only (vcpu );
8594+ }
85778595}
85788596
85798597static void vmx_hwapic_isr_update (struct kvm_vcpu * vcpu , int max_isr )
@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
99749992{
99759993 struct vcpu_vmx * vmx = to_vmx (vcpu );
99769994 u32 exec_control ;
9977- bool nested_ept_enabled = false;
99789995
99799996 vmcs_write16 (GUEST_ES_SELECTOR , vmcs12 -> guest_es_selector );
99809997 vmcs_write16 (GUEST_CS_SELECTOR , vmcs12 -> guest_cs_selector );
@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1012110138 vmcs12 -> guest_intr_status );
1012210139 }
1012310140
10124- nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT ) != 0 ;
10125-
1012610141 /*
1012710142 * Write an illegal value to APIC_ACCESS_ADDR. Later,
1012810143 * nested_get_vmcs12_pages will either fix it up or
@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1025510270 if (nested_cpu_has_ept (vmcs12 )) {
1025610271 kvm_mmu_unload (vcpu );
1025710272 nested_ept_init_mmu_context (vcpu );
10273+ } else if (nested_cpu_has2 (vmcs12 ,
10274+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )) {
10275+ vmx_flush_tlb_ept_only (vcpu );
1025810276 }
1025910277
1026010278 /*
@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1028210300 vmx_set_efer (vcpu , vcpu -> arch .efer );
1028310301
1028410302 /* Shadow page tables on either EPT or shadow page tables. */
10285- if (nested_vmx_load_cr3 (vcpu , vmcs12 -> guest_cr3 , nested_ept_enabled ,
10303+ if (nested_vmx_load_cr3 (vcpu , vmcs12 -> guest_cr3 , nested_cpu_has_ept ( vmcs12 ) ,
1028610304 entry_failure_code ))
1028710305 return 1 ;
1028810306
10289- kvm_mmu_reset_context (vcpu );
10290-
1029110307 if (!enable_ept )
1029210308 vcpu -> arch .walk_mmu -> inject_page_fault = vmx_inject_page_fault_nested ;
1029310309
@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1105611072 vmx -> nested .change_vmcs01_virtual_x2apic_mode = false;
1105711073 vmx_set_virtual_x2apic_mode (vcpu ,
1105811074 vcpu -> arch .apic_base & X2APIC_ENABLE );
11075+ } else if (!nested_cpu_has_ept (vmcs12 ) &&
11076+ nested_cpu_has2 (vmcs12 ,
11077+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )) {
11078+ vmx_flush_tlb_ept_only (vcpu );
1105911079 }
1106011080
1106111081 /* This is needed for same reason as it was needed in prepare_vmcs02 */
0 commit comments