Skip to content

Commit d6321d4

Browse files
rkrcmarbonzini
authored andcommitted
KVM: x86: generalize guest_cpuid_has_ helpers
This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid, X86_FEATURE_XYZ), which gets rid of many very similar helpers. When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but this information isn't in common code, so we recreate it for KVM. Add some BUILD_BUG_ONs to make sure that it runs nicely. Signed-off-by: Radim Krčmář <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent c6bd180 commit d6321d4

File tree

6 files changed

+95
-150
lines changed

6 files changed

+95
-150
lines changed

arch/x86/kvm/cpuid.h

Lines changed: 57 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include "x86.h"
55
#include <asm/cpu.h>
6+
#include <asm/processor.h>
67

78
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
89
bool kvm_mpx_supported(void);
@@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
2930
return vcpu->arch.maxphyaddr;
3031
}
3132

32-
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
33-
{
34-
struct kvm_cpuid_entry2 *best;
33+
struct cpuid_reg {
34+
u32 function;
35+
u32 index;
36+
int reg;
37+
};
3538

36-
if (!static_cpu_has(X86_FEATURE_XSAVE))
37-
return false;
38-
39-
best = kvm_find_cpuid_entry(vcpu, 1, 0);
40-
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
41-
}
39+
static const struct cpuid_reg reverse_cpuid[] = {
40+
[CPUID_1_EDX] = { 1, 0, CPUID_EDX},
41+
[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
42+
[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
43+
[CPUID_1_ECX] = { 1, 0, CPUID_ECX},
44+
[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
45+
[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
46+
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
47+
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
48+
[CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
49+
[CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
50+
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
51+
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
52+
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
53+
[CPUID_7_ECX] = { 7, 0, CPUID_ECX},
54+
[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
55+
};
4256

43-
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
57+
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
4458
{
45-
struct kvm_cpuid_entry2 *best;
59+
unsigned x86_leaf = x86_feature / 32;
4660

47-
best = kvm_find_cpuid_entry(vcpu, 1, 0);
48-
return best && (best->edx & bit(X86_FEATURE_MTRR));
49-
}
61+
BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
62+
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
63+
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
5064

51-
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
52-
{
53-
struct kvm_cpuid_entry2 *best;
54-
55-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
56-
return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
57-
}
58-
59-
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
60-
{
61-
struct kvm_cpuid_entry2 *best;
62-
63-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
64-
return best && (best->ebx & bit(X86_FEATURE_SMEP));
65+
return reverse_cpuid[x86_leaf];
6566
}
6667

67-
static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
68+
static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
6869
{
69-
struct kvm_cpuid_entry2 *best;
70-
71-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
72-
return best && (best->ebx & bit(X86_FEATURE_SMAP));
73-
}
74-
75-
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
76-
{
77-
struct kvm_cpuid_entry2 *best;
78-
79-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
80-
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
81-
}
82-
83-
static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
84-
{
85-
struct kvm_cpuid_entry2 *best;
86-
87-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
88-
return best && (best->ecx & bit(X86_FEATURE_PKU));
89-
}
90-
91-
static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
92-
{
93-
struct kvm_cpuid_entry2 *best;
94-
95-
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
96-
return best && (best->edx & bit(X86_FEATURE_LM));
97-
}
70+
struct kvm_cpuid_entry2 *entry;
71+
const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
9872

99-
static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
100-
{
101-
struct kvm_cpuid_entry2 *best;
73+
entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
74+
if (!entry)
75+
return NULL;
10276

103-
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104-
return best && (best->ecx & bit(X86_FEATURE_OSVW));
77+
switch (cpuid.reg) {
78+
case CPUID_EAX:
79+
return &entry->eax;
80+
case CPUID_EBX:
81+
return &entry->ebx;
82+
case CPUID_ECX:
83+
return &entry->ecx;
84+
case CPUID_EDX:
85+
return &entry->edx;
86+
default:
87+
BUILD_BUG();
88+
return NULL;
89+
}
10590
}
10691

107-
static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
92+
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
10893
{
109-
struct kvm_cpuid_entry2 *best;
94+
int *reg;
11095

111-
best = kvm_find_cpuid_entry(vcpu, 1, 0);
112-
return best && (best->ecx & bit(X86_FEATURE_PCID));
113-
}
96+
if (x86_feature == X86_FEATURE_XSAVE &&
97+
!static_cpu_has(X86_FEATURE_XSAVE))
98+
return false;
11499

115-
static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
116-
{
117-
struct kvm_cpuid_entry2 *best;
100+
reg = guest_cpuid_get_register(vcpu, x86_feature);
101+
if (!reg)
102+
return false;
118103

119-
best = kvm_find_cpuid_entry(vcpu, 1, 0);
120-
return best && (best->ecx & bit(X86_FEATURE_X2APIC));
104+
return *reg & bit(x86_feature);
121105
}
122106

123107
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
@@ -128,46 +112,6 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
128112
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
129113
}
130114

131-
static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
132-
{
133-
struct kvm_cpuid_entry2 *best;
134-
135-
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
136-
return best && (best->edx & bit(X86_FEATURE_GBPAGES));
137-
}
138-
139-
static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
140-
{
141-
struct kvm_cpuid_entry2 *best;
142-
143-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
144-
return best && (best->ebx & bit(X86_FEATURE_RTM));
145-
}
146-
147-
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
148-
{
149-
struct kvm_cpuid_entry2 *best;
150-
151-
best = kvm_find_cpuid_entry(vcpu, 7, 0);
152-
return best && (best->ebx & bit(X86_FEATURE_MPX));
153-
}
154-
155-
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
156-
{
157-
struct kvm_cpuid_entry2 *best;
158-
159-
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
160-
return best && (best->edx & bit(X86_FEATURE_RDTSCP));
161-
}
162-
163-
static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
164-
{
165-
struct kvm_cpuid_entry2 *best;
166-
167-
best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
168-
return best && (best->edx & bit(X86_FEATURE_NRIPS));
169-
}
170-
171115
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
172116
{
173117
struct kvm_cpuid_entry2 *best;

arch/x86/kvm/mmu.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4052,7 +4052,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
40524052
{
40534053
__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
40544054
cpuid_maxphyaddr(vcpu), context->root_level,
4055-
context->nx, guest_cpuid_has_gbpages(vcpu),
4055+
context->nx,
4056+
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
40564057
is_pse(vcpu), guest_cpuid_is_amd(vcpu));
40574058
}
40584059

@@ -4114,8 +4115,8 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
41144115
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
41154116
boot_cpu_data.x86_phys_bits,
41164117
context->shadow_root_level, uses_nx,
4117-
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
4118-
true);
4118+
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4119+
is_pse(vcpu), true);
41194120
}
41204121
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
41214122

arch/x86/kvm/mtrr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
130130
* enable MTRRs and it is obviously undesirable to run the
131131
* guest entirely with UC memory and we use WB.
132132
*/
133-
if (guest_cpuid_has_mtrr(vcpu))
133+
if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
134134
return MTRR_TYPE_UNCACHABLE;
135135
else
136136
return MTRR_TYPE_WRBACK;

arch/x86/kvm/svm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5078,7 +5078,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
50785078
struct kvm_cpuid_entry2 *entry;
50795079

50805080
/* Update nrips enabled cache */
5081-
svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
5081+
svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
50825082

50835083
if (!kvm_vcpu_apicv_active(vcpu))
50845084
return;

arch/x86/kvm/vmx.c

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2611,7 +2611,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
26112611
if (index >= 0)
26122612
move_msr_up(vmx, index, save_nmsrs++);
26132613
index = __find_msr_index(vmx, MSR_TSC_AUX);
2614-
if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
2614+
if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
26152615
move_msr_up(vmx, index, save_nmsrs++);
26162616
/*
26172617
* MSR_STAR is only needed on long mode guests, and only
@@ -2671,12 +2671,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
26712671
}
26722672
}
26732673

2674-
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
2675-
{
2676-
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
2677-
return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
2678-
}
2679-
26802674
/*
26812675
* nested_vmx_allowed() checks whether a guest should be allowed to use VMX
26822676
* instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
@@ -2685,7 +2679,7 @@ static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
26852679
*/
26862680
static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
26872681
{
2688-
return nested && guest_cpuid_has_vmx(vcpu);
2682+
return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
26892683
}
26902684

26912685
/*
@@ -3281,7 +3275,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
32813275
break;
32823276
case MSR_IA32_BNDCFGS:
32833277
if (!kvm_mpx_supported() ||
3284-
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
3278+
(!msr_info->host_initiated &&
3279+
!guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
32853280
return 1;
32863281
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
32873282
break;
@@ -3305,7 +3300,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
33053300
msr_info->data = vcpu->arch.ia32_xss;
33063301
break;
33073302
case MSR_TSC_AUX:
3308-
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
3303+
if (!msr_info->host_initiated &&
3304+
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
33093305
return 1;
33103306
/* Otherwise falls through */
33113307
default:
@@ -3364,7 +3360,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
33643360
break;
33653361
case MSR_IA32_BNDCFGS:
33663362
if (!kvm_mpx_supported() ||
3367-
(!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
3363+
(!msr_info->host_initiated &&
3364+
!guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
33683365
return 1;
33693366
if (is_noncanonical_address(data & PAGE_MASK) ||
33703367
(data & MSR_IA32_BNDCFGS_RSVD))
@@ -3427,7 +3424,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
34273424
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
34283425
break;
34293426
case MSR_TSC_AUX:
3430-
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
3427+
if (!msr_info->host_initiated &&
3428+
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
34313429
return 1;
34323430
/* Check reserved bit, higher 32 bits should be zero */
34333431
if ((data >> 32) != 0)
@@ -9622,7 +9620,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
96229620
u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
96239621

96249622
if (vmx_rdtscp_supported()) {
9625-
bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
9623+
bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
96269624
if (!rdtscp_enabled)
96279625
secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
96289626

@@ -9641,7 +9639,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
96419639
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
96429640
bool invpcid_enabled =
96439641
best && best->ebx & bit(X86_FEATURE_INVPCID) &&
9644-
guest_cpuid_has_pcid(vcpu);
9642+
guest_cpuid_has(vcpu, X86_FEATURE_PCID);
96459643

96469644
if (!invpcid_enabled) {
96479645
secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;

0 commit comments

Comments
 (0)