@@ -64,27 +64,25 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
6464 return ((ecode & c -> cmask ) - c -> code ) <= (u64 )c -> size ;
6565}
6666
67+ #define PERF_ARCH (name , val ) \
68+ PERF_X86_EVENT_##name = val,
69+
6770/*
6871 * struct hw_perf_event.flags flags
6972 */
70- #define PERF_X86_EVENT_PEBS_LDLAT 0x00001 /* ld+ldlat data address sampling */
71- #define PERF_X86_EVENT_PEBS_ST 0x00002 /* st data address sampling */
72- #define PERF_X86_EVENT_PEBS_ST_HSW 0x00004 /* haswell style datala, store */
73- #define PERF_X86_EVENT_PEBS_LD_HSW 0x00008 /* haswell style datala, load */
74- #define PERF_X86_EVENT_PEBS_NA_HSW 0x00010 /* haswell style datala, unknown */
75- #define PERF_X86_EVENT_EXCL 0x00020 /* HT exclusivity on counter */
76- #define PERF_X86_EVENT_DYNAMIC 0x00040 /* dynamic alloc'd constraint */
77-
78- #define PERF_X86_EVENT_EXCL_ACCT 0x00100 /* accounted EXCL event */
79- #define PERF_X86_EVENT_AUTO_RELOAD 0x00200 /* use PEBS auto-reload */
80- #define PERF_X86_EVENT_LARGE_PEBS 0x00400 /* use large PEBS */
81- #define PERF_X86_EVENT_PEBS_VIA_PT 0x00800 /* use PT buffer for PEBS */
82- #define PERF_X86_EVENT_PAIR 0x01000 /* Large Increment per Cycle */
83- #define PERF_X86_EVENT_LBR_SELECT 0x02000 /* Save/Restore MSR_LBR_SELECT */
84- #define PERF_X86_EVENT_TOPDOWN 0x04000 /* Count Topdown slots/metrics events */
85- #define PERF_X86_EVENT_PEBS_STLAT 0x08000 /* st+stlat data address sampling */
86- #define PERF_X86_EVENT_AMD_BRS 0x10000 /* AMD Branch Sampling */
87- #define PERF_X86_EVENT_PEBS_LAT_HYBRID 0x20000 /* ld and st lat for hybrid */
73+ enum {
74+ #include "perf_event_flags.h"
75+ };
76+
77+ #undef PERF_ARCH
78+
79+ #define PERF_ARCH (name , val ) \
80+ static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \
81+ PERF_X86_EVENT_##name);
82+
83+ #include "perf_event_flags.h"
84+
85+ #undef PERF_ARCH
8886
8987static inline bool is_topdown_count (struct perf_event * event )
9088{
0 commit comments