Skip to content

Commit de63ad4

Browse files
Longpeng(Mike)bonzini
authored andcommitted
KVM: X86: implement the logic for spinlock optimization
get_cpl requires vcpu_load, so we must cache the result (whether the vcpu was preempted when its cpl=0) in kvm_vcpu_arch. Signed-off-by: Longpeng(Mike) <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 199b576 commit de63ad4

File tree

5 files changed

+21
-4
lines changed

5 files changed

+21
-4
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -688,6 +688,9 @@ struct kvm_vcpu_arch {
688688

689689
/* GPA available (AMD only) */
690690
bool gpa_available;
691+
692+
/* be preempted when it's in kernel-mode(cpl=0) */
693+
bool preempted_in_kernel;
691694
};
692695

693696
struct kvm_lpage_info {

arch/x86/kvm/hyperv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
12741274

12751275
switch (code) {
12761276
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1277-
kvm_vcpu_on_spin(vcpu, false);
1277+
kvm_vcpu_on_spin(vcpu, true);
12781278
break;
12791279
case HVCALL_POST_MESSAGE:
12801280
case HVCALL_SIGNAL_EVENT:

arch/x86/kvm/svm.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
37493749

37503750
static int pause_interception(struct vcpu_svm *svm)
37513751
{
3752-
kvm_vcpu_on_spin(&svm->vcpu, false);
3752+
struct kvm_vcpu *vcpu = &svm->vcpu;
3753+
bool in_kernel = (svm_get_cpl(vcpu) == 0);
3754+
3755+
kvm_vcpu_on_spin(vcpu, in_kernel);
37533756
return 1;
37543757
}
37553758

arch/x86/kvm/vmx.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6781,7 +6781,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
67816781
if (ple_gap)
67826782
grow_ple_window(vcpu);
67836783

6784-
kvm_vcpu_on_spin(vcpu, false);
6784+
/*
6785+
* Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
6786+
* VM-execution control is ignored if CPL > 0. OTOH, KVM
6787+
* never set PAUSE_EXITING and just set PLE if supported,
6788+
* so the vcpu must be CPL=0 if it gets a PAUSE exit.
6789+
*/
6790+
kvm_vcpu_on_spin(vcpu, true);
67856791
return kvm_skip_emulated_instruction(vcpu);
67866792
}
67876793

arch/x86/kvm/x86.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2873,6 +2873,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
28732873
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
28742874
{
28752875
int idx;
2876+
2877+
if (vcpu->preempted)
2878+
vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
2879+
28762880
/*
28772881
* Disable page faults because we're in atomic context here.
28782882
* kvm_write_guest_offset_cached() would call might_fault()
@@ -7985,6 +7989,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
79857989
kvm_pmu_init(vcpu);
79867990

79877991
vcpu->arch.pending_external_vector = -1;
7992+
vcpu->arch.preempted_in_kernel = false;
79887993

79897994
kvm_hv_vcpu_init(vcpu);
79907995

@@ -8434,7 +8439,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
84348439

84358440
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
84368441
{
8437-
return false;
8442+
return vcpu->arch.preempted_in_kernel;
84388443
}
84398444

84408445
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)