Skip to content

Commit 9ed38ff

Browse files
ladiprobonzini
authored andcommitted
KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry
Loading CR3 as part of emulating vmentry is different from regular CR3 loads, as implemented in kvm_set_cr3, in several ways. * different rules are followed to check CR3 and it is desirable for the caller to distinguish between the possible failures * PDPTRs are not loaded if PAE paging and nested EPT are both enabled * many MMU operations are not necessary This patch introduces nested_vmx_load_cr3 suitable for CR3 loads as part of nested vmentry and vmexit, and makes use of it on the nested vmentry path. Signed-off-by: Ladi Prosek <[email protected]> Signed-off-by: Radim Krčmář <[email protected]>
1 parent ee146c1 commit 9ed38ff

File tree

3 files changed

+45
-16
lines changed

3 files changed

+45
-16
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1071,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
10711071
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
10721072

10731073
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1074+
bool pdptrs_changed(struct kvm_vcpu *vcpu);
10741075

10751076
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10761077
const void *val, int bytes);

arch/x86/kvm/vmx.c

Lines changed: 42 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9968,6 +9968,44 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
99689968
return 0;
99699969
}
99709970

9971+
/*
9972+
* Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
9973+
* emulating VM entry into a guest with EPT enabled.
9974+
* Returns 0 on success, 1 on failure. Invalid state exit qualification code
9975+
* is assigned to entry_failure_code on failure.
9976+
*/
9977+
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
9978+
unsigned long *entry_failure_code)
9979+
{
9980+
unsigned long invalid_mask;
9981+
9982+
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
9983+
invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
9984+
if (cr3 & invalid_mask) {
9985+
*entry_failure_code = ENTRY_FAIL_DEFAULT;
9986+
return 1;
9987+
}
9988+
9989+
/*
9990+
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
9991+
* must not be dereferenced.
9992+
*/
9993+
if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
9994+
!nested_ept) {
9995+
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
9996+
*entry_failure_code = ENTRY_FAIL_PDPTE;
9997+
return 1;
9998+
}
9999+
}
10000+
10001+
vcpu->arch.cr3 = cr3;
10002+
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
10003+
}
10004+
10005+
kvm_mmu_reset_context(vcpu);
10006+
return 0;
10007+
}
10008+
997110009
/*
997210010
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
997310011
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -10300,21 +10338,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1030010338
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
1030110339
vmx_set_efer(vcpu, vcpu->arch.efer);
1030210340

10303-
/*
10304-
* Shadow page tables on either EPT or shadow page tables.
10305-
* If PAE and EPT are both on, CR3 is not used by the CPU and must not
10306-
* be dereferenced.
10307-
*/
10308-
if (is_pae(vcpu) && is_paging(vcpu) && !is_long_mode(vcpu) &&
10309-
nested_ept_enabled) {
10310-
vcpu->arch.cr3 = vmcs12->guest_cr3;
10311-
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
10312-
} else {
10313-
if (kvm_set_cr3(vcpu, vmcs12->guest_cr3)) {
10314-
*entry_failure_code = ENTRY_FAIL_DEFAULT;
10315-
return 1;
10316-
}
10317-
}
10341+
/* Shadow page tables on either EPT or shadow page tables. */
10342+
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
10343+
entry_failure_code))
10344+
return 1;
1031810345

1031910346
kvm_mmu_reset_context(vcpu);
1032010347

arch/x86/kvm/x86.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -566,7 +566,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
566566
}
567567
EXPORT_SYMBOL_GPL(load_pdptrs);
568568

569-
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
569+
bool pdptrs_changed(struct kvm_vcpu *vcpu)
570570
{
571571
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
572572
bool changed = true;
@@ -592,6 +592,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
592592

593593
return changed;
594594
}
595+
EXPORT_SYMBOL_GPL(pdptrs_changed);
595596

596597
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
597598
{

0 commit comments

Comments
 (0)