Skip to content

Commit 1018faa

Browse files
joergroedelIngo Molnar
authored andcommitted
perf/x86/kvm: Fix Host-Only/Guest-Only counting with SVM disabled
It turned out that a performance counter on AMD does not count at all when the GO or HO bit is set in the control register and SVM is disabled in EFER. This patch works around this issue by masking out the HO bit in the performance counter control register when SVM is not enabled. The GO bit is not touched because it is only set when the user wants to count in guest-mode only. So when SVM is disabled the counter should not run at all and the not-counting is the intended behaviour. Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: Avi Kivity <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: David Ahern <[email protected]> Cc: Gleb Natapov <[email protected]> Cc: Robert Richter <[email protected]> Cc: [email protected] # v3.2 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 5d85d97 commit 1018faa

File tree

4 files changed

+54
-4
lines changed

4 files changed

+54
-4
lines changed

arch/x86/include/asm/perf_event.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -242,4 +242,12 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
242242
static inline void perf_events_lapic_init(void) { }
243243
#endif
244244

245+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
246+
extern void amd_pmu_enable_virt(void);
247+
extern void amd_pmu_disable_virt(void);
248+
#else
249+
static inline void amd_pmu_enable_virt(void) { }
250+
static inline void amd_pmu_disable_virt(void) { }
251+
#endif
252+
245253
#endif /* _ASM_X86_PERF_EVENT_H */

arch/x86/kernel/cpu/perf_event.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,9 @@ struct cpu_hw_events {
147147
/*
148148
* AMD specific bits
149149
*/
150-
struct amd_nb *amd_nb;
150+
struct amd_nb *amd_nb;
151+
/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
152+
u64 perf_ctr_virt_mask;
151153

152154
void *kfree_on_online;
153155
};
@@ -417,9 +419,11 @@ void x86_pmu_disable_all(void);
417419
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
418420
u64 enable_mask)
419421
{
422+
u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
423+
420424
if (hwc->extra_reg.reg)
421425
wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
422-
wrmsrl(hwc->config_base, hwc->config | enable_mask);
426+
wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
423427
}
424428

425429
void x86_pmu_enable_all(int added);

arch/x86/kernel/cpu/perf_event_amd.c

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include <linux/perf_event.h>
2+
#include <linux/export.h>
23
#include <linux/types.h>
34
#include <linux/init.h>
45
#include <linux/slab.h>
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
357358
struct amd_nb *nb;
358359
int i, nb_id;
359360

360-
if (boot_cpu_data.x86_max_cores < 2)
361+
cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
362+
363+
if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
361364
return;
362365

363366
nb_id = amd_get_nb_id(cpu);
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
587590
.put_event_constraints = amd_put_event_constraints,
588591

589592
.cpu_prepare = amd_pmu_cpu_prepare,
590-
.cpu_starting = amd_pmu_cpu_starting,
591593
.cpu_dead = amd_pmu_cpu_dead,
592594
#endif
595+
.cpu_starting = amd_pmu_cpu_starting,
593596
};
594597

595598
__init int amd_pmu_init(void)
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
621624

622625
return 0;
623626
}
627+
628+
void amd_pmu_enable_virt(void)
629+
{
630+
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
631+
632+
cpuc->perf_ctr_virt_mask = 0;
633+
634+
/* Reload all events */
635+
x86_pmu_disable_all();
636+
x86_pmu_enable_all(0);
637+
}
638+
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
639+
640+
void amd_pmu_disable_virt(void)
641+
{
642+
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
643+
644+
/*
645+
* We only mask out the Host-only bit so that host-only counting works
646+
* when SVM is disabled. If someone sets up a guest-only counter when
647+
* SVM is disabled the Guest-only bits still gets set and the counter
648+
* will not count anything.
649+
*/
650+
cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
651+
652+
/* Reload all events */
653+
x86_pmu_disable_all();
654+
x86_pmu_enable_all(0);
655+
}
656+
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);

arch/x86/kvm/svm.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include <linux/ftrace_event.h>
3030
#include <linux/slab.h>
3131

32+
#include <asm/perf_event.h>
3233
#include <asm/tlbflush.h>
3334
#include <asm/desc.h>
3435
#include <asm/kvm_para.h>
@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage)
575576
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
576577

577578
cpu_svm_disable();
579+
580+
amd_pmu_disable_virt();
578581
}
579582

580583
static int svm_hardware_enable(void *garbage)
@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage)
622625

623626
svm_init_erratum_383();
624627

628+
amd_pmu_enable_virt();
629+
625630
return 0;
626631
}
627632

0 commit comments

Comments
 (0)