2020#include "mmu.h"
2121#include "cpuid.h"
2222#include "lapic.h"
23+ #include "hyperv.h"
2324
2425#include <linux/kvm_host.h>
2526#include <linux/module.h>
@@ -889,6 +890,8 @@ struct nested_vmx {
889890 bool guest_mode ;
890891 } smm ;
891892
893+ gpa_t hv_evmcs_vmptr ;
894+ struct page * hv_evmcs_page ;
892895 struct hv_enlightened_vmcs * hv_evmcs ;
893896};
894897
@@ -8111,11 +8114,13 @@ static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
81118114static int nested_vmx_failValid (struct kvm_vcpu * vcpu ,
81128115 u32 vm_instruction_error )
81138116{
8117+ struct vcpu_vmx * vmx = to_vmx (vcpu );
8118+
81148119 /*
81158120 * failValid writes the error number to the current VMCS, which
81168121 * can't be done if there isn't a current VMCS.
81178122 */
8118- if (to_vmx ( vcpu ) -> nested .current_vmptr == -1ull )
8123+ if (vmx -> nested .current_vmptr == -1ull && ! vmx -> nested . hv_evmcs )
81198124 return nested_vmx_failInvalid (vcpu );
81208125
81218126 vmx_set_rflags (vcpu , (vmx_get_rflags (vcpu )
@@ -8441,6 +8446,20 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
84418446 vmcs_write64 (VMCS_LINK_POINTER , -1ull );
84428447}
84438448
8449+ static inline void nested_release_evmcs (struct kvm_vcpu * vcpu )
8450+ {
8451+ struct vcpu_vmx * vmx = to_vmx (vcpu );
8452+
8453+ if (!vmx -> nested .hv_evmcs )
8454+ return ;
8455+
8456+ kunmap (vmx -> nested .hv_evmcs_page );
8457+ kvm_release_page_dirty (vmx -> nested .hv_evmcs_page );
8458+ vmx -> nested .hv_evmcs_vmptr = -1ull ;
8459+ vmx -> nested .hv_evmcs_page = NULL ;
8460+ vmx -> nested .hv_evmcs = NULL ;
8461+ }
8462+
84448463static inline void nested_release_vmcs12 (struct kvm_vcpu * vcpu )
84458464{
84468465 struct vcpu_vmx * vmx = to_vmx (vcpu );
@@ -8509,6 +8528,8 @@ static void free_nested(struct kvm_vcpu *vcpu)
85098528
85108529 kvm_mmu_free_roots (vcpu , & vcpu -> arch .guest_mmu , KVM_MMU_ROOTS_ALL );
85118530
8531+ nested_release_evmcs (vcpu );
8532+
85128533 free_loaded_vmcs (& vmx -> nested .vmcs02 );
85138534}
85148535
@@ -8542,12 +8563,18 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
85428563 return nested_vmx_failValid (vcpu ,
85438564 VMXERR_VMCLEAR_VMXON_POINTER );
85448565
8545- if (vmptr == vmx -> nested .current_vmptr )
8546- nested_release_vmcs12 (vcpu );
8566+ if (vmx -> nested .hv_evmcs_page ) {
8567+ if (vmptr == vmx -> nested .hv_evmcs_vmptr )
8568+ nested_release_evmcs (vcpu );
8569+ } else {
8570+ if (vmptr == vmx -> nested .current_vmptr )
8571+ nested_release_vmcs12 (vcpu );
85478572
8548- kvm_vcpu_write_guest (vcpu ,
8549- vmptr + offsetof(struct vmcs12 , launch_state ),
8550- & zero , sizeof (zero ));
8573+ kvm_vcpu_write_guest (vcpu ,
8574+ vmptr + offsetof(struct vmcs12 ,
8575+ launch_state ),
8576+ & zero , sizeof (zero ));
8577+ }
85518578
85528579 return nested_vmx_succeed (vcpu );
85538580}
@@ -8637,6 +8664,8 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
86378664 struct vmcs12 * vmcs12 = vmx -> nested .cached_vmcs12 ;
86388665 struct hv_enlightened_vmcs * evmcs = vmx -> nested .hv_evmcs ;
86398666
8667+ vmcs12 -> hdr .revision_id = evmcs -> revision_id ;
8668+
86408669 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
86418670 vmcs12 -> tpr_threshold = evmcs -> tpr_threshold ;
86428671 vmcs12 -> guest_rip = evmcs -> guest_rip ;
@@ -9268,6 +9297,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
92689297 return nested_vmx_failValid (vcpu ,
92699298 VMXERR_VMPTRLD_VMXON_POINTER );
92709299
9300+ /* Forbid normal VMPTRLD if Enlightened version was used */
9301+ if (vmx -> nested .hv_evmcs )
9302+ return 1 ;
9303+
92719304 if (vmx -> nested .current_vmptr != vmptr ) {
92729305 struct vmcs12 * new_vmcs12 ;
92739306 struct page * page ;
@@ -9301,6 +9334,68 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
93019334 return nested_vmx_succeed (vcpu );
93029335}
93039336
9337+ /*
9338+ * This is an equivalent of the nested hypervisor executing the vmptrld
9339+ * instruction.
9340+ */
9341+ static int nested_vmx_handle_enlightened_vmptrld (struct kvm_vcpu * vcpu )
9342+ {
9343+ struct vcpu_vmx * vmx = to_vmx (vcpu );
9344+ struct hv_vp_assist_page assist_page ;
9345+
9346+ if (likely (!vmx -> nested .enlightened_vmcs_enabled ))
9347+ return 1 ;
9348+
9349+ if (unlikely (!kvm_hv_get_assist_page (vcpu , & assist_page )))
9350+ return 1 ;
9351+
9352+ if (unlikely (!assist_page .enlighten_vmentry ))
9353+ return 1 ;
9354+
9355+ if (unlikely (assist_page .current_nested_vmcs !=
9356+ vmx -> nested .hv_evmcs_vmptr )) {
9357+
9358+ if (!vmx -> nested .hv_evmcs )
9359+ vmx -> nested .current_vmptr = -1ull ;
9360+
9361+ nested_release_evmcs (vcpu );
9362+
9363+ vmx -> nested .hv_evmcs_page = kvm_vcpu_gpa_to_page (
9364+ vcpu , assist_page .current_nested_vmcs );
9365+
9366+ if (unlikely (is_error_page (vmx -> nested .hv_evmcs_page )))
9367+ return 0 ;
9368+
9369+ vmx -> nested .hv_evmcs = kmap (vmx -> nested .hv_evmcs_page );
9370+
9371+ if (vmx -> nested .hv_evmcs -> revision_id != VMCS12_REVISION ) {
9372+ nested_release_evmcs (vcpu );
9373+ return 0 ;
9374+ }
9375+
9376+ vmx -> nested .dirty_vmcs12 = true;
9377+ /*
9378+ * As we keep L2 state for one guest only 'hv_clean_fields' mask
9379+ * can't be used when we switch between them. Reset it here for
9380+ * simplicity.
9381+ */
9382+ vmx -> nested .hv_evmcs -> hv_clean_fields &=
9383+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL ;
9384+ vmx -> nested .hv_evmcs_vmptr = assist_page .current_nested_vmcs ;
9385+
9386+ /*
9387+ * Unlike normal vmcs12, enlightened vmcs12 is not fully
9388+ * reloaded from guest's memory (read only fields, fields not
9389+ * present in struct hv_enlightened_vmcs, ...). Make sure there
9390+ * are no leftovers.
9391+ */
9392+ memset (vmx -> nested .cached_vmcs12 , 0 ,
9393+ sizeof (* vmx -> nested .cached_vmcs12 ));
9394+
9395+ }
9396+ return 1 ;
9397+ }
9398+
93049399/* Emulate the VMPTRST instruction */
93059400static int handle_vmptrst (struct kvm_vcpu * vcpu )
93069401{
@@ -9313,6 +9408,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
93139408 if (!nested_vmx_check_permission (vcpu ))
93149409 return 1 ;
93159410
9411+ if (unlikely (to_vmx (vcpu )-> nested .hv_evmcs ))
9412+ return 1 ;
9413+
93169414 if (get_vmx_mem_address (vcpu , exit_qual , instr_info , true, & gva ))
93179415 return 1 ;
93189416 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
@@ -13314,7 +13412,10 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1331413412 if (!nested_vmx_check_permission (vcpu ))
1331513413 return 1 ;
1331613414
13317- if (vmx -> nested .current_vmptr == -1ull )
13415+ if (!nested_vmx_handle_enlightened_vmptrld (vcpu ))
13416+ return 1 ;
13417+
13418+ if (!vmx -> nested .hv_evmcs && vmx -> nested .current_vmptr == -1ull )
1331813419 return nested_vmx_failInvalid (vcpu );
1331913420
1332013421 vmcs12 = get_vmcs12 (vcpu );
0 commit comments