@@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28802880 vmx_update_msr_bitmap (& vmx -> vcpu );
28812881}
28822882
2883- /*
2884- * reads and returns guest's timestamp counter "register"
2885- * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2886- * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2887- */
2888- static u64 guest_read_tsc (struct kvm_vcpu * vcpu )
2883+ static u64 vmx_read_l1_tsc_offset (struct kvm_vcpu * vcpu )
28892884{
2890- u64 host_tsc , tsc_offset ;
2885+ struct vmcs12 * vmcs12 = get_vmcs12 ( vcpu ) ;
28912886
2892- host_tsc = rdtsc ();
2893- tsc_offset = vmcs_read64 (TSC_OFFSET );
2894- return kvm_scale_tsc (vcpu , host_tsc ) + tsc_offset ;
2887+ if (is_guest_mode (vcpu ) &&
2888+ (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING ))
2889+ return vcpu -> arch .tsc_offset - vmcs12 -> tsc_offset ;
2890+
2891+ return vcpu -> arch .tsc_offset ;
28952892}
28962893
28972894/*
@@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
35243521#endif
35253522 case MSR_EFER :
35263523 return kvm_get_msr_common (vcpu , msr_info );
3527- case MSR_IA32_TSC :
3528- msr_info -> data = guest_read_tsc (vcpu );
3529- break ;
35303524 case MSR_IA32_SPEC_CTRL :
35313525 if (!msr_info -> host_initiated &&
35323526 !guest_cpuid_has (vcpu , X86_FEATURE_IBRS ) &&
@@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
36463640 return 1 ;
36473641 vmcs_write64 (GUEST_BNDCFGS , data );
36483642 break ;
3649- case MSR_IA32_TSC :
3650- kvm_write_tsc (vcpu , msr_info );
3651- break ;
36523643 case MSR_IA32_SPEC_CTRL :
36533644 if (!msr_info -> host_initiated &&
36543645 !guest_cpuid_has (vcpu , X86_FEATURE_IBRS ) &&
@@ -10608,6 +10599,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
1060810599 return true;
1060910600}
1061010601
10602+ static int nested_vmx_check_apic_access_controls (struct kvm_vcpu * vcpu ,
10603+ struct vmcs12 * vmcs12 )
10604+ {
10605+ if (nested_cpu_has2 (vmcs12 , SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ) &&
10606+ !page_address_valid (vcpu , vmcs12 -> apic_access_addr ))
10607+ return - EINVAL ;
10608+ else
10609+ return 0 ;
10610+ }
10611+
1061110612static int nested_vmx_check_apicv_controls (struct kvm_vcpu * vcpu ,
1061210613 struct vmcs12 * vmcs12 )
1061310614{
@@ -11176,11 +11177,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1117611177 vmcs_write64 (GUEST_IA32_PAT , vmx -> vcpu .arch .pat );
1117711178 }
1117811179
11179- if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11180- vmcs_write64 (TSC_OFFSET ,
11181- vcpu -> arch .tsc_offset + vmcs12 -> tsc_offset );
11182- else
11183- vmcs_write64 (TSC_OFFSET , vcpu -> arch .tsc_offset );
11180+ vmcs_write64 (TSC_OFFSET , vcpu -> arch .tsc_offset );
11181+
1118411182 if (kvm_has_tsc_control )
1118511183 decache_tsc_multiplier (vmx );
1118611184
@@ -11299,6 +11297,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1129911297 if (nested_vmx_check_msr_bitmap_controls (vcpu , vmcs12 ))
1130011298 return VMXERR_ENTRY_INVALID_CONTROL_FIELD ;
1130111299
11300+ if (nested_vmx_check_apic_access_controls (vcpu , vmcs12 ))
11301+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD ;
11302+
1130211303 if (nested_vmx_check_tpr_shadow_controls (vcpu , vmcs12 ))
1130311304 return VMXERR_ENTRY_INVALID_CONTROL_FIELD ;
1130411305
@@ -11420,6 +11421,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
1142011421 struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
1142111422 u32 msr_entry_idx ;
1142211423 u32 exit_qual ;
11424+ int r ;
1142311425
1142411426 enter_guest_mode (vcpu );
1142511427
@@ -11429,26 +11431,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
1142911431 vmx_switch_vmcs (vcpu , & vmx -> nested .vmcs02 );
1143011432 vmx_segment_cache_clear (vmx );
1143111433
11432- if (prepare_vmcs02 (vcpu , vmcs12 , from_vmentry , & exit_qual )) {
11433- leave_guest_mode (vcpu );
11434- vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11435- nested_vmx_entry_failure (vcpu , vmcs12 ,
11436- EXIT_REASON_INVALID_STATE , exit_qual );
11437- return 1 ;
11438- }
11434+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11435+ vcpu -> arch .tsc_offset += vmcs12 -> tsc_offset ;
11436+
11437+ r = EXIT_REASON_INVALID_STATE ;
11438+ if (prepare_vmcs02 (vcpu , vmcs12 , from_vmentry , & exit_qual ))
11439+ goto fail ;
1143911440
1144011441 nested_get_vmcs12_pages (vcpu , vmcs12 );
1144111442
11443+ r = EXIT_REASON_MSR_LOAD_FAIL ;
1144211444 msr_entry_idx = nested_vmx_load_msr (vcpu ,
1144311445 vmcs12 -> vm_entry_msr_load_addr ,
1144411446 vmcs12 -> vm_entry_msr_load_count );
11445- if (msr_entry_idx ) {
11446- leave_guest_mode (vcpu );
11447- vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11448- nested_vmx_entry_failure (vcpu , vmcs12 ,
11449- EXIT_REASON_MSR_LOAD_FAIL , msr_entry_idx );
11450- return 1 ;
11451- }
11447+ if (msr_entry_idx )
11448+ goto fail ;
1145211449
1145311450 /*
1145411451 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11457,6 +11454,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
1145711454 * the success flag) when L2 exits (see nested_vmx_vmexit()).
1145811455 */
1145911456 return 0 ;
11457+
11458+ fail :
11459+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
11460+ vcpu -> arch .tsc_offset -= vmcs12 -> tsc_offset ;
11461+ leave_guest_mode (vcpu );
11462+ vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11463+ nested_vmx_entry_failure (vcpu , vmcs12 , r , exit_qual );
11464+ return 1 ;
1146011465}
1146111466
1146211467/*
@@ -12028,6 +12033,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1202812033
1202912034 leave_guest_mode (vcpu );
1203012035
12036+ if (vmcs12 -> cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING )
12037+ vcpu -> arch .tsc_offset -= vmcs12 -> tsc_offset ;
12038+
1203112039 if (likely (!vmx -> fail )) {
1203212040 if (exit_reason == -1 )
1203312041 sync_vmcs12 (vcpu , vmcs12 );
@@ -12224,10 +12232,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
1222412232
1222512233static int vmx_set_hv_timer (struct kvm_vcpu * vcpu , u64 guest_deadline_tsc )
1222612234{
12227- struct vcpu_vmx * vmx = to_vmx (vcpu );
12228- u64 tscl = rdtsc ();
12229- u64 guest_tscl = kvm_read_l1_tsc (vcpu , tscl );
12230- u64 delta_tsc = max (guest_deadline_tsc , guest_tscl ) - guest_tscl ;
12235+ struct vcpu_vmx * vmx ;
12236+ u64 tscl , guest_tscl , delta_tsc ;
12237+
12238+ if (kvm_mwait_in_guest (vcpu -> kvm ))
12239+ return - EOPNOTSUPP ;
12240+
12241+ vmx = to_vmx (vcpu );
12242+ tscl = rdtsc ();
12243+ guest_tscl = kvm_read_l1_tsc (vcpu , tscl );
12244+ delta_tsc = max (guest_deadline_tsc , guest_tscl ) - guest_tscl ;
1223112245
1223212246 /* Convert to host delta tsc if tsc scaling is enabled */
1223312247 if (vcpu -> arch .tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
@@ -12533,7 +12547,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
1253312547 vcpu_info .pi_desc_addr = __pa (vcpu_to_pi_desc (vcpu ));
1253412548 vcpu_info .vector = irq .vector ;
1253512549
12536- trace_kvm_pi_irte_update (vcpu -> vcpu_id , host_irq , e -> gsi ,
12550+ trace_kvm_pi_irte_update (host_irq , vcpu -> vcpu_id , e -> gsi ,
1253712551 vcpu_info .vector , vcpu_info .pi_desc_addr , set );
1253812552
1253912553 if (set )
@@ -12712,6 +12726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1271212726
1271312727 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit ,
1271412728
12729+ .read_l1_tsc_offset = vmx_read_l1_tsc_offset ,
1271512730 .write_tsc_offset = vmx_write_tsc_offset ,
1271612731
1271712732 .set_tdp_cr3 = vmx_set_cr3 ,
0 commit comments