Skip to content

Commit b38d0a9

Browse files
committed
KVM: x86: do not report a vCPU as preempted outside instruction boundaries
jira VULN-3802 cve CVE-2022-39189 commit-author Paolo Bonzini <[email protected]> commit 6cd8824 If a vCPU is outside guest mode and is scheduled out, it might be in the process of making a memory access. A problem occurs if another vCPU uses the PV TLB flush feature during the period when the vCPU is scheduled out, and a virtual address has already been translated but has not yet been accessed, because this is equivalent to using a stale TLB entry. To avoid this, only report a vCPU as preempted if sure that the guest is at an instruction boundary. A rescheduling request will be delivered to the host physical CPU as an external interrupt, so for simplicity consider any vmexit *not* instruction boundary except for external interrupts. It would in principle be okay to report the vCPU as preempted also if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the vmentry/vmexit overhead unnecessarily, and optimistic spinning is also unlikely to succeed. However, leave it for later because right now kvm_vcpu_check_block() is doing memory accesses. Even though the TLB flush issue only applies to virtual memory address, it's very much preferrable to be conservative. Reported-by: Jann Horn <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> (cherry picked from commit 6cd8824) Signed-off-by: Brett Mastbergen <[email protected]>
1 parent 0e0fb04 commit b38d0a9

File tree

4 files changed

+28
-0
lines changed

4 files changed

+28
-0
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -639,6 +639,7 @@ struct kvm_vcpu_arch {
639639
u64 ia32_misc_enable_msr;
640640
u64 smbase;
641641
u64 smi_count;
642+
bool at_instruction_boundary;
642643
bool tpr_access_reporting;
643644
bool xsaves_enabled;
644645
bool xfd_no_write_intercept;
@@ -1256,6 +1257,8 @@ struct kvm_vcpu_stat {
12561257
u64 nested_run;
12571258
u64 directed_yield_attempted;
12581259
u64 directed_yield_successful;
1260+
u64 preemption_reported;
1261+
u64 preemption_other;
12591262
u64 guest_mode;
12601263
};
12611264

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4255,6 +4255,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
42554255

42564256
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
42574257
{
4258+
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4259+
vcpu->arch.at_instruction_boundary = true;
42584260
}
42594261

42604262
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6521,6 +6521,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
65216521
return;
65226522

65236523
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
6524+
vcpu->arch.at_instruction_boundary = true;
65246525
}
65256526

65266527
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)

arch/x86/kvm/x86.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -278,6 +278,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
278278
STATS_DESC_COUNTER(VCPU, nested_run),
279279
STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
280280
STATS_DESC_COUNTER(VCPU, directed_yield_successful),
281+
STATS_DESC_COUNTER(VCPU, preemption_reported),
282+
STATS_DESC_COUNTER(VCPU, preemption_other),
281283
STATS_DESC_ICOUNTER(VCPU, guest_mode)
282284
};
283285

@@ -4462,6 +4464,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
44624464
struct kvm_host_map map;
44634465
struct kvm_steal_time *st;
44644466

4467+
/*
4468+
* The vCPU can be marked preempted if and only if the VM-Exit was on
4469+
* an instruction boundary and will not trigger guest emulation of any
4470+
* kind (see vcpu_run). Vendor specific code controls (conservatively)
4471+
* when this is true, for example allowing the vCPU to be marked
4472+
* preempted if and only if the VM-Exit was due to a host interrupt.
4473+
*/
4474+
if (!vcpu->arch.at_instruction_boundary) {
4475+
vcpu->stat.preemption_other++;
4476+
return;
4477+
}
4478+
4479+
vcpu->stat.preemption_reported++;
44654480
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
44664481
return;
44674482

@@ -10040,6 +10055,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
1004010055
vcpu->arch.l1tf_flush_l1d = true;
1004110056

1004210057
for (;;) {
10058+
/*
10059+
* If another guest vCPU requests a PV TLB flush in the middle
10060+
* of instruction emulation, the rest of the emulation could
10061+
* use a stale page translation. Assume that any code after
10062+
* this point can start executing an instruction.
10063+
*/
10064+
vcpu->arch.at_instruction_boundary = false;
1004310065
if (kvm_vcpu_running(vcpu)) {
1004410066
r = vcpu_enter_guest(vcpu);
1004510067
} else {

0 commit comments

Comments
 (0)