@@ -2076,6 +2076,14 @@ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
20762076 EVENT_EXTRA_END
20772077};
20782078
2079+ static struct extra_reg intel_grt_extra_regs [] __read_mostly = {
2080+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2081+ INTEL_UEVENT_EXTRA_REG (0x01b7 , MSR_OFFCORE_RSP_0 , 0x3fffffffffull , RSP_0 ),
2082+ INTEL_UEVENT_EXTRA_REG (0x02b7 , MSR_OFFCORE_RSP_1 , 0x3fffffffffull , RSP_1 ),
2083+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG (0x5d0 ),
2084+ EVENT_EXTRA_END
2085+ };
2086+
20792087#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
20802088#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
20812089#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -2430,6 +2438,16 @@ static int icl_set_topdown_event_period(struct perf_event *event)
24302438 return 0 ;
24312439}
24322440
2441+ static int adl_set_topdown_event_period (struct perf_event * event )
2442+ {
2443+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
2444+
2445+ if (pmu -> cpu_type != hybrid_big )
2446+ return 0 ;
2447+
2448+ return icl_set_topdown_event_period (event );
2449+ }
2450+
24332451static inline u64 icl_get_metrics_event_value (u64 metric , u64 slots , int idx )
24342452{
24352453 u32 val ;
@@ -2570,6 +2588,17 @@ static u64 icl_update_topdown_event(struct perf_event *event)
25702588 x86_pmu .num_topdown_events - 1 );
25712589}
25722590
2591+ static u64 adl_update_topdown_event (struct perf_event * event )
2592+ {
2593+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
2594+
2595+ if (pmu -> cpu_type != hybrid_big )
2596+ return 0 ;
2597+
2598+ return icl_update_topdown_event (event );
2599+ }
2600+
2601+
25732602static void intel_pmu_read_topdown_event (struct perf_event * event )
25742603{
25752604 struct cpu_hw_events * cpuc = this_cpu_ptr (& cpu_hw_events );
@@ -3655,6 +3684,17 @@ static inline bool is_mem_loads_aux_event(struct perf_event *event)
36553684 return (event -> attr .config & INTEL_ARCH_EVENT_MASK ) == X86_CONFIG (.event = 0x03 , .umask = 0x82 );
36563685}
36573686
3687+ static inline bool require_mem_loads_aux_event (struct perf_event * event )
3688+ {
3689+ if (!(x86_pmu .flags & PMU_FL_MEM_LOADS_AUX ))
3690+ return false;
3691+
3692+ if (is_hybrid ())
3693+ return hybrid_pmu (event -> pmu )-> cpu_type == hybrid_big ;
3694+
3695+ return true;
3696+ }
3697+
36583698static inline bool intel_pmu_has_cap (struct perf_event * event , int idx )
36593699{
36603700 union perf_capabilities * intel_cap = & hybrid (event -> pmu , intel_cap );
@@ -3779,7 +3819,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
37793819 * event. The rule is to simplify the implementation of the check.
37803820 * That's because perf cannot have a complete group at the moment.
37813821 */
3782- if (x86_pmu . flags & PMU_FL_MEM_LOADS_AUX &&
3822+ if (require_mem_loads_aux_event ( event ) &&
37833823 (event -> attr .sample_type & PERF_SAMPLE_DATA_SRC ) &&
37843824 is_mem_loads_event (event )) {
37853825 struct perf_event * leader = event -> group_leader ;
@@ -4056,6 +4096,39 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
40564096 return c ;
40574097}
40584098
4099+ static struct event_constraint *
4100+ adl_get_event_constraints (struct cpu_hw_events * cpuc , int idx ,
4101+ struct perf_event * event )
4102+ {
4103+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
4104+
4105+ if (pmu -> cpu_type == hybrid_big )
4106+ return spr_get_event_constraints (cpuc , idx , event );
4107+ else if (pmu -> cpu_type == hybrid_small )
4108+ return tnt_get_event_constraints (cpuc , idx , event );
4109+
4110+ WARN_ON (1 );
4111+ return & emptyconstraint ;
4112+ }
4113+
4114+ static int adl_hw_config (struct perf_event * event )
4115+ {
4116+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
4117+
4118+ if (pmu -> cpu_type == hybrid_big )
4119+ return hsw_hw_config (event );
4120+ else if (pmu -> cpu_type == hybrid_small )
4121+ return intel_pmu_hw_config (event );
4122+
4123+ WARN_ON (1 );
4124+ return - EOPNOTSUPP ;
4125+ }
4126+
4127+ static u8 adl_get_hybrid_cpu_type (void )
4128+ {
4129+ return hybrid_big ;
4130+ }
4131+
40594132/*
40604133 * Broadwell:
40614134 *
@@ -4416,6 +4489,14 @@ static int intel_pmu_aux_output_match(struct perf_event *event)
44164489 return is_intel_pt_event (event );
44174490}
44184491
4492+ static int intel_pmu_filter_match (struct perf_event * event )
4493+ {
4494+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
4495+ unsigned int cpu = smp_processor_id ();
4496+
4497+ return cpumask_test_cpu (cpu , & pmu -> supported_cpus );
4498+ }
4499+
44194500PMU_FORMAT_ATTR (offcore_rsp , "config1:0-63" );
44204501
44214502PMU_FORMAT_ATTR (ldlat , "config1:0-15" );
@@ -5118,6 +5199,84 @@ static const struct attribute_group *attr_update[] = {
51185199 NULL ,
51195200};
51205201
5202+ EVENT_ATTR_STR_HYBRID (slots , slots_adl , "event=0x00,umask=0x4" , hybrid_big );
5203+ EVENT_ATTR_STR_HYBRID (topdown - retiring , td_retiring_adl , "event=0xc2,umask=0x0;event=0x00,umask=0x80" , hybrid_big_small );
5204+ EVENT_ATTR_STR_HYBRID (topdown - bad - spec , td_bad_spec_adl , "event=0x73,umask=0x0;event=0x00,umask=0x81" , hybrid_big_small );
5205+ EVENT_ATTR_STR_HYBRID (topdown - fe - bound , td_fe_bound_adl , "event=0x71,umask=0x0;event=0x00,umask=0x82" , hybrid_big_small );
5206+ EVENT_ATTR_STR_HYBRID (topdown - be - bound , td_be_bound_adl , "event=0x74,umask=0x0;event=0x00,umask=0x83" , hybrid_big_small );
5207+ EVENT_ATTR_STR_HYBRID (topdown - heavy - ops , td_heavy_ops_adl , "event=0x00,umask=0x84" , hybrid_big );
5208+ EVENT_ATTR_STR_HYBRID (topdown - br - mispredict , td_br_mis_adl , "event=0x00,umask=0x85" , hybrid_big );
5209+ EVENT_ATTR_STR_HYBRID (topdown - fetch - lat , td_fetch_lat_adl , "event=0x00,umask=0x86" , hybrid_big );
5210+ EVENT_ATTR_STR_HYBRID (topdown - mem - bound , td_mem_bound_adl , "event=0x00,umask=0x87" , hybrid_big );
5211+
5212+ static struct attribute * adl_hybrid_events_attrs [] = {
5213+ EVENT_PTR (slots_adl ),
5214+ EVENT_PTR (td_retiring_adl ),
5215+ EVENT_PTR (td_bad_spec_adl ),
5216+ EVENT_PTR (td_fe_bound_adl ),
5217+ EVENT_PTR (td_be_bound_adl ),
5218+ EVENT_PTR (td_heavy_ops_adl ),
5219+ EVENT_PTR (td_br_mis_adl ),
5220+ EVENT_PTR (td_fetch_lat_adl ),
5221+ EVENT_PTR (td_mem_bound_adl ),
5222+ NULL ,
5223+ };
5224+
5225+ /* Must be in IDX order */
5226+ EVENT_ATTR_STR_HYBRID (mem - loads , mem_ld_adl , "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3" , hybrid_big_small );
5227+ EVENT_ATTR_STR_HYBRID (mem - stores , mem_st_adl , "event=0xd0,umask=0x6;event=0xcd,umask=0x2" , hybrid_big_small );
5228+ EVENT_ATTR_STR_HYBRID (mem - loads - aux , mem_ld_aux_adl , "event=0x03,umask=0x82" , hybrid_big );
5229+
5230+ static struct attribute * adl_hybrid_mem_attrs [] = {
5231+ EVENT_PTR (mem_ld_adl ),
5232+ EVENT_PTR (mem_st_adl ),
5233+ EVENT_PTR (mem_ld_aux_adl ),
5234+ NULL ,
5235+ };
5236+
5237+ EVENT_ATTR_STR_HYBRID (tx - start , tx_start_adl , "event=0xc9,umask=0x1" , hybrid_big );
5238+ EVENT_ATTR_STR_HYBRID (tx - commit , tx_commit_adl , "event=0xc9,umask=0x2" , hybrid_big );
5239+ EVENT_ATTR_STR_HYBRID (tx - abort , tx_abort_adl , "event=0xc9,umask=0x4" , hybrid_big );
5240+ EVENT_ATTR_STR_HYBRID (tx - conflict , tx_conflict_adl , "event=0x54,umask=0x1" , hybrid_big );
5241+ EVENT_ATTR_STR_HYBRID (cycles - t , cycles_t_adl , "event=0x3c,in_tx=1" , hybrid_big );
5242+ EVENT_ATTR_STR_HYBRID (cycles - ct , cycles_ct_adl , "event=0x3c,in_tx=1,in_tx_cp=1" , hybrid_big );
5243+ EVENT_ATTR_STR_HYBRID (tx - capacity - read , tx_capacity_read_adl , "event=0x54,umask=0x80" , hybrid_big );
5244+ EVENT_ATTR_STR_HYBRID (tx - capacity - write , tx_capacity_write_adl , "event=0x54,umask=0x2" , hybrid_big );
5245+
5246+ static struct attribute * adl_hybrid_tsx_attrs [] = {
5247+ EVENT_PTR (tx_start_adl ),
5248+ EVENT_PTR (tx_abort_adl ),
5249+ EVENT_PTR (tx_commit_adl ),
5250+ EVENT_PTR (tx_capacity_read_adl ),
5251+ EVENT_PTR (tx_capacity_write_adl ),
5252+ EVENT_PTR (tx_conflict_adl ),
5253+ EVENT_PTR (cycles_t_adl ),
5254+ EVENT_PTR (cycles_ct_adl ),
5255+ NULL ,
5256+ };
5257+
5258+ FORMAT_ATTR_HYBRID (in_tx , hybrid_big );
5259+ FORMAT_ATTR_HYBRID (in_tx_cp , hybrid_big );
5260+ FORMAT_ATTR_HYBRID (offcore_rsp , hybrid_big_small );
5261+ FORMAT_ATTR_HYBRID (ldlat , hybrid_big_small );
5262+ FORMAT_ATTR_HYBRID (frontend , hybrid_big );
5263+
5264+ static struct attribute * adl_hybrid_extra_attr_rtm [] = {
5265+ FORMAT_HYBRID_PTR (in_tx ),
5266+ FORMAT_HYBRID_PTR (in_tx_cp ),
5267+ FORMAT_HYBRID_PTR (offcore_rsp ),
5268+ FORMAT_HYBRID_PTR (ldlat ),
5269+ FORMAT_HYBRID_PTR (frontend ),
5270+ NULL ,
5271+ };
5272+
5273+ static struct attribute * adl_hybrid_extra_attr [] = {
5274+ FORMAT_HYBRID_PTR (offcore_rsp ),
5275+ FORMAT_HYBRID_PTR (ldlat ),
5276+ FORMAT_HYBRID_PTR (frontend ),
5277+ NULL ,
5278+ };
5279+
51215280static bool is_attr_for_this_pmu (struct kobject * kobj , struct attribute * attr )
51225281{
51235282 struct device * dev = kobj_to_dev (kobj );
@@ -5347,6 +5506,7 @@ __init int intel_pmu_init(void)
53475506 bool pmem = false;
53485507 int version , i ;
53495508 char * name ;
5509+ struct x86_hybrid_pmu * pmu ;
53505510
53515511 if (!cpu_has (& boot_cpu_data , X86_FEATURE_ARCH_PERFMON )) {
53525512 switch (boot_cpu_data .x86 ) {
@@ -5941,6 +6101,99 @@ __init int intel_pmu_init(void)
59416101 name = "sapphire_rapids" ;
59426102 break ;
59436103
6104+ case INTEL_FAM6_ALDERLAKE :
6105+ case INTEL_FAM6_ALDERLAKE_L :
6106+ /*
6107+ * Alder Lake has 2 types of CPU, core and atom.
6108+ *
6109+ * Initialize the common PerfMon capabilities here.
6110+ */
6111+ x86_pmu .hybrid_pmu = kcalloc (X86_HYBRID_NUM_PMUS ,
6112+ sizeof (struct x86_hybrid_pmu ),
6113+ GFP_KERNEL );
6114+ if (!x86_pmu .hybrid_pmu )
6115+ return - ENOMEM ;
6116+ static_branch_enable (& perf_is_hybrid );
6117+ x86_pmu .num_hybrid_pmus = X86_HYBRID_NUM_PMUS ;
6118+
6119+ x86_pmu .late_ack = true;
6120+ x86_pmu .pebs_aliases = NULL ;
6121+ x86_pmu .pebs_prec_dist = true;
6122+ x86_pmu .pebs_block = true;
6123+ x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6124+ x86_pmu .flags |= PMU_FL_NO_HT_SHARING ;
6125+ x86_pmu .flags |= PMU_FL_PEBS_ALL ;
6126+ x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6127+ x86_pmu .flags |= PMU_FL_MEM_LOADS_AUX ;
6128+ x86_pmu .lbr_pt_coexist = true;
6129+ intel_pmu_pebs_data_source_skl (false);
6130+ x86_pmu .num_topdown_events = 8 ;
6131+ x86_pmu .update_topdown_event = adl_update_topdown_event ;
6132+ x86_pmu .set_topdown_event_period = adl_set_topdown_event_period ;
6133+
6134+ x86_pmu .filter_match = intel_pmu_filter_match ;
6135+ x86_pmu .get_event_constraints = adl_get_event_constraints ;
6136+ x86_pmu .hw_config = adl_hw_config ;
6137+ x86_pmu .limit_period = spr_limit_period ;
6138+ x86_pmu .get_hybrid_cpu_type = adl_get_hybrid_cpu_type ;
6139+ /*
6140+ * The rtm_abort_event is used to check whether to enable GPRs
6141+ * for the RTM abort event. Atom doesn't have the RTM abort
6142+ * event. There is no harmful to set it in the common
6143+ * x86_pmu.rtm_abort_event.
6144+ */
6145+ x86_pmu .rtm_abort_event = X86_CONFIG (.event = 0xc9 , .umask = 0x04 );
6146+
6147+ td_attr = adl_hybrid_events_attrs ;
6148+ mem_attr = adl_hybrid_mem_attrs ;
6149+ tsx_attr = adl_hybrid_tsx_attrs ;
6150+ extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
6151+ adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr ;
6152+
6153+ /* Initialize big core specific PerfMon capabilities.*/
6154+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ];
6155+ pmu -> name = "cpu_core" ;
6156+ pmu -> cpu_type = hybrid_big ;
6157+ pmu -> num_counters = x86_pmu .num_counters + 2 ;
6158+ pmu -> num_counters_fixed = x86_pmu .num_counters_fixed + 1 ;
6159+ pmu -> max_pebs_events = min_t (unsigned , MAX_PEBS_EVENTS , pmu -> num_counters );
6160+ pmu -> unconstrained = (struct event_constraint )
6161+ __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
6162+ 0 , pmu -> num_counters , 0 , 0 );
6163+ pmu -> intel_cap .capabilities = x86_pmu .intel_cap .capabilities ;
6164+ pmu -> intel_cap .perf_metrics = 1 ;
6165+ pmu -> intel_cap .pebs_output_pt_available = 0 ;
6166+
6167+ memcpy (pmu -> hw_cache_event_ids , spr_hw_cache_event_ids , sizeof (pmu -> hw_cache_event_ids ));
6168+ memcpy (pmu -> hw_cache_extra_regs , spr_hw_cache_extra_regs , sizeof (pmu -> hw_cache_extra_regs ));
6169+ pmu -> event_constraints = intel_spr_event_constraints ;
6170+ pmu -> pebs_constraints = intel_spr_pebs_event_constraints ;
6171+ pmu -> extra_regs = intel_spr_extra_regs ;
6172+
6173+ /* Initialize Atom core specific PerfMon capabilities.*/
6174+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ];
6175+ pmu -> name = "cpu_atom" ;
6176+ pmu -> cpu_type = hybrid_small ;
6177+ pmu -> num_counters = x86_pmu .num_counters ;
6178+ pmu -> num_counters_fixed = x86_pmu .num_counters_fixed ;
6179+ pmu -> max_pebs_events = x86_pmu .max_pebs_events ;
6180+ pmu -> unconstrained = (struct event_constraint )
6181+ __EVENT_CONSTRAINT (0 , (1ULL << pmu -> num_counters ) - 1 ,
6182+ 0 , pmu -> num_counters , 0 , 0 );
6183+ pmu -> intel_cap .capabilities = x86_pmu .intel_cap .capabilities ;
6184+ pmu -> intel_cap .perf_metrics = 0 ;
6185+ pmu -> intel_cap .pebs_output_pt_available = 1 ;
6186+
6187+ memcpy (pmu -> hw_cache_event_ids , glp_hw_cache_event_ids , sizeof (pmu -> hw_cache_event_ids ));
6188+ memcpy (pmu -> hw_cache_extra_regs , tnt_hw_cache_extra_regs , sizeof (pmu -> hw_cache_extra_regs ));
6189+ pmu -> hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6190+ pmu -> event_constraints = intel_slm_event_constraints ;
6191+ pmu -> pebs_constraints = intel_grt_pebs_event_constraints ;
6192+ pmu -> extra_regs = intel_grt_extra_regs ;
6193+ pr_cont ("Alderlake Hybrid events, " );
6194+ name = "alderlake_hybrid" ;
6195+ break ;
6196+
59446197 default :
59456198 switch (x86_pmu .version ) {
59466199 case 1 :
0 commit comments