Skip to content

Commit 199b576

Browse files
Longpeng(Mike)bonzini
authored andcommitted
KVM: add spinlock optimization framework
If a vcpu exits due to request a user mode spinlock, then the spinlock-holder may be preempted in user mode or kernel mode. (Note that not all architectures trap spin loops in user mode, only AMD x86 and ARM/ARM64 currently do). But if a vcpu exits in kernel mode, then the holder must be preempted in kernel mode, so we should choose a vcpu in kernel mode as a more likely candidate for the lock holder. This introduces kvm_arch_vcpu_in_kernel() to decide whether the vcpu is in kernel-mode when it's preempted. kvm_vcpu_on_spin's new argument says the same of the spinning VCPU. Signed-off-by: Longpeng(Mike) <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 1b4d56b commit 199b576

File tree

13 files changed

+36
-8
lines changed

13 files changed

+36
-8
lines changed

arch/arm/kvm/handle_exit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
6767
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
6868
trace_kvm_wfx(*vcpu_pc(vcpu), true);
6969
vcpu->stat.wfe_exit_stat++;
70-
kvm_vcpu_on_spin(vcpu);
70+
kvm_vcpu_on_spin(vcpu, false);
7171
} else {
7272
trace_kvm_wfx(*vcpu_pc(vcpu), false);
7373
vcpu->stat.wfi_exit_stat++;

arch/arm64/kvm/handle_exit.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
8484
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
8585
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
8686
vcpu->stat.wfe_exit_stat++;
87-
kvm_vcpu_on_spin(vcpu);
87+
kvm_vcpu_on_spin(vcpu, false);
8888
} else {
8989
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
9090
vcpu->stat.wfi_exit_stat++;

arch/mips/kvm/mips.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
9898
return !!(vcpu->arch.pending_exceptions);
9999
}
100100

101+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
102+
{
103+
return false;
104+
}
105+
101106
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
102107
{
103108
return 1;

arch/powerpc/kvm/powerpc.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
5858
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
5959
}
6060

61+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62+
{
63+
return false;
64+
}
65+
6166
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
6267
{
6368
return 1;

arch/s390/kvm/diag.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
150150
{
151151
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
152152
vcpu->stat.diagnose_44++;
153-
kvm_vcpu_on_spin(vcpu);
153+
kvm_vcpu_on_spin(vcpu, false);
154154
return 0;
155155
}
156156

arch/s390/kvm/kvm-s390.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2447,6 +2447,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
24472447
return kvm_s390_vcpu_has_irq(vcpu, 0);
24482448
}
24492449

2450+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2451+
{
2452+
return false;
2453+
}
2454+
24502455
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
24512456
{
24522457
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);

arch/x86/kvm/hyperv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
12741274

12751275
switch (code) {
12761276
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1277-
kvm_vcpu_on_spin(vcpu);
1277+
kvm_vcpu_on_spin(vcpu, false);
12781278
break;
12791279
case HVCALL_POST_MESSAGE:
12801280
case HVCALL_SIGNAL_EVENT:

arch/x86/kvm/svm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3749,7 +3749,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
37493749

37503750
static int pause_interception(struct vcpu_svm *svm)
37513751
{
3752-
kvm_vcpu_on_spin(&(svm->vcpu));
3752+
kvm_vcpu_on_spin(&svm->vcpu, false);
37533753
return 1;
37543754
}
37553755

arch/x86/kvm/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6781,7 +6781,7 @@ static int handle_pause(struct kvm_vcpu *vcpu)
67816781
if (ple_gap)
67826782
grow_ple_window(vcpu);
67836783

6784-
kvm_vcpu_on_spin(vcpu);
6784+
kvm_vcpu_on_spin(vcpu, false);
67856785
return kvm_skip_emulated_instruction(vcpu);
67866786
}
67876787

arch/x86/kvm/x86.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8432,6 +8432,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
84328432
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
84338433
}
84348434

8435+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
8436+
{
8437+
return false;
8438+
}
8439+
84358440
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
84368441
{
84378442
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;

0 commit comments

Comments
 (0)