@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
17301730 * disabled state if called consecutively.
17311731 *
17321732 * During consecutive calls, the same disable value will be written to related
1733- * registers, so the PMU state remains unchanged. hw.state in
1734- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
1735- * calls.
1733+ * registers, so the PMU state remains unchanged.
1734+ *
1735+ * intel_bts events don't coexist with intel PMU's BTS events because of
1736+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1737+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
17361738 */
17371739static void __intel_pmu_disable_all (void )
17381740{
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
17421744
17431745 if (test_bit (INTEL_PMC_IDX_FIXED_BTS , cpuc -> active_mask ))
17441746 intel_pmu_disable_bts ();
1745- else
1746- intel_bts_disable_local ();
17471747
17481748 intel_pmu_pebs_disable_all ();
17491749}
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
17711771 return ;
17721772
17731773 intel_pmu_enable_bts (event -> hw .config );
1774- } else
1775- intel_bts_enable_local ();
1774+ }
17761775}
17771776
17781777static void intel_pmu_enable_all (int added )
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
20732072 */
20742073 if (!x86_pmu .late_ack )
20752074 apic_write (APIC_LVTPC , APIC_DM_NMI );
2075+ intel_bts_disable_local ();
20762076 __intel_pmu_disable_all ();
20772077 handled = intel_pmu_drain_bts_buffer ();
20782078 handled += intel_bts_interrupt ();
@@ -2172,6 +2172,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
21722172 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
21732173 if (cpuc -> enabled )
21742174 __intel_pmu_enable_all (0 , true);
2175+ intel_bts_enable_local ();
21752176
21762177 /*
21772178 * Only unmask the NMI after the overflow counters
0 commit comments