Skip to content

Commit ceef7d1

Browse files
vittyvkbonzini
authored andcommitted
KVM: x86: VMX: hyper-v: Enlightened MSR-Bitmap support
Enlightened MSR-Bitmap is a natural extension of Enlightened VMCS: Hyper-V Top Level Functional Specification states: "The L1 hypervisor may collaborate with the L0 hypervisor to make MSR accesses more efficient. It can enable enlightened MSR bitmaps by setting the corresponding field in the enlightened VMCS to 1. When enabled, the L0 hypervisor does not monitor the MSR bitmaps for changes. Instead, the L1 hypervisor must invalidate the corresponding clean field after making changes to one of the MSR bitmaps." I reached out to Hyper-V team for additional details and I got the following information: "Current Hyper-V implementation works as following: If the enlightened MSR bitmap is not enabled: - All MSR accesses of L2 guests cause physical VM-Exits If the enlightened MSR bitmap is enabled: - Physical VM-Exits for L2 accesses to certain MSRs (currently FS_BASE, GS_BASE and KERNEL_GS_BASE) are avoided, thus making these MSR accesses faster." I tested my series with a tight rdmsrl loop in L2, for KERNEL_GS_BASE the results are: Without Enlightened MSR-Bitmap: 1300 cycles/read With Enlightened MSR-Bitmap: 120 cycles/read Signed-off-by: Vitaly Kuznetsov <[email protected]> Tested-by: Lan Tianyu <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 74b566e commit ceef7d1

File tree

2 files changed

+33
-1
lines changed

2 files changed

+33
-1
lines changed

arch/x86/include/asm/hyperv-tlfs.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,6 +303,9 @@ struct ms_hyperv_tsc_page {
303303
/* TSC emulation after migration */
304304
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
305305

306+
/* Nested features (CPUID 0x4000000A) EAX */
307+
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
308+
306309
struct hv_reenlightenment_control {
307310
__u64 vector:8;
308311
__u64 reserved1:8;
@@ -668,7 +671,11 @@ struct hv_enlightened_vmcs {
668671
u32 hv_clean_fields;
669672
u32 hv_padding_32;
670673
u32 hv_synthetic_controls;
671-
u32 hv_enlightenments_control;
674+
struct {
675+
u32 nested_flush_hypercall:1;
676+
u32 msr_bitmap:1;
677+
u32 reserved:30;
678+
} hv_enlightenments_control;
672679
u32 hv_vp_id;
673680

674681
u64 hv_vm_id;

arch/x86/kvm/vmx.c

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1089,6 +1089,16 @@ static inline u16 evmcs_read16(unsigned long field)
10891089
return *(u16 *)((char *)current_evmcs + offset);
10901090
}
10911091

1092+
static inline void evmcs_touch_msr_bitmap(void)
1093+
{
1094+
if (unlikely(!current_evmcs))
1095+
return;
1096+
1097+
if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1098+
current_evmcs->hv_clean_fields &=
1099+
~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1100+
}
1101+
10921102
static void evmcs_load(u64 phys_addr)
10931103
{
10941104
struct hv_vp_assist_page *vp_ap =
@@ -1173,6 +1183,7 @@ static inline u32 evmcs_read32(unsigned long field) { return 0; }
11731183
static inline u16 evmcs_read16(unsigned long field) { return 0; }
11741184
static inline void evmcs_load(u64 phys_addr) {}
11751185
static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
1186+
static inline void evmcs_touch_msr_bitmap(void) {}
11761187
#endif /* IS_ENABLED(CONFIG_HYPERV) */
11771188

11781189
static inline bool is_exception_n(u32 intr_info, u8 vector)
@@ -4219,6 +4230,14 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
42194230
if (!loaded_vmcs->msr_bitmap)
42204231
goto out_vmcs;
42214232
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
4233+
4234+
if (static_branch_unlikely(&enable_evmcs) &&
4235+
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
4236+
struct hv_enlightened_vmcs *evmcs =
4237+
(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
4238+
4239+
evmcs->hv_enlightenments_control.msr_bitmap = 1;
4240+
}
42224241
}
42234242
return 0;
42244243

@@ -5332,6 +5351,9 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
53325351
if (!cpu_has_vmx_msr_bitmap())
53335352
return;
53345353

5354+
if (static_branch_unlikely(&enable_evmcs))
5355+
evmcs_touch_msr_bitmap();
5356+
53355357
/*
53365358
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
53375359
* have the write-low and read-high bitmap offsets the wrong way round.
@@ -5367,6 +5389,9 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
53675389
if (!cpu_has_vmx_msr_bitmap())
53685390
return;
53695391

5392+
if (static_branch_unlikely(&enable_evmcs))
5393+
evmcs_touch_msr_bitmap();
5394+
53705395
/*
53715396
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
53725397
* have the write-low and read-high bitmap offsets the wrong way round.

0 commit comments

Comments
 (0)