Skip to content

Commit 88081cf

Browse files
Anshuman KhandualPeter Zijlstra
authored andcommitted
x86/perf: Assert all platform event flags are within PERF_EVENT_FLAG_ARCH
Ensure all platform specific event flags are within PERF_EVENT_FLAG_ARCH. Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: James Clark <james.clark@arm.com> Link: https://lkml.kernel.org/r/20220907091924.439193-5-anshuman.khandual@arm.com
1 parent 91207f6 commit 88081cf

File tree

2 files changed

+38
-18
lines changed

2 files changed

+38
-18
lines changed

arch/x86/events/perf_event.h

Lines changed: 16 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -64,27 +64,25 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
6464
return ((ecode & c->cmask) - c->code) <= (u64)c->size;
6565
}
6666

67+
#define PERF_ARCH(name, val) \
68+
PERF_X86_EVENT_##name = val,
69+
6770
/*
6871
* struct hw_perf_event.flags flags
6972
*/
70-
#define PERF_X86_EVENT_PEBS_LDLAT 0x00001 /* ld+ldlat data address sampling */
71-
#define PERF_X86_EVENT_PEBS_ST 0x00002 /* st data address sampling */
72-
#define PERF_X86_EVENT_PEBS_ST_HSW 0x00004 /* haswell style datala, store */
73-
#define PERF_X86_EVENT_PEBS_LD_HSW 0x00008 /* haswell style datala, load */
74-
#define PERF_X86_EVENT_PEBS_NA_HSW 0x00010 /* haswell style datala, unknown */
75-
#define PERF_X86_EVENT_EXCL 0x00020 /* HT exclusivity on counter */
76-
#define PERF_X86_EVENT_DYNAMIC 0x00040 /* dynamic alloc'd constraint */
77-
78-
#define PERF_X86_EVENT_EXCL_ACCT 0x00100 /* accounted EXCL event */
79-
#define PERF_X86_EVENT_AUTO_RELOAD 0x00200 /* use PEBS auto-reload */
80-
#define PERF_X86_EVENT_LARGE_PEBS 0x00400 /* use large PEBS */
81-
#define PERF_X86_EVENT_PEBS_VIA_PT 0x00800 /* use PT buffer for PEBS */
82-
#define PERF_X86_EVENT_PAIR 0x01000 /* Large Increment per Cycle */
83-
#define PERF_X86_EVENT_LBR_SELECT 0x02000 /* Save/Restore MSR_LBR_SELECT */
84-
#define PERF_X86_EVENT_TOPDOWN 0x04000 /* Count Topdown slots/metrics events */
85-
#define PERF_X86_EVENT_PEBS_STLAT 0x08000 /* st+stlat data address sampling */
86-
#define PERF_X86_EVENT_AMD_BRS 0x10000 /* AMD Branch Sampling */
87-
#define PERF_X86_EVENT_PEBS_LAT_HYBRID 0x20000 /* ld and st lat for hybrid */
73+
enum {
74+
#include "perf_event_flags.h"
75+
};
76+
77+
#undef PERF_ARCH
78+
79+
#define PERF_ARCH(name, val) \
80+
static_assert((PERF_X86_EVENT_##name & PERF_EVENT_FLAG_ARCH) == \
81+
PERF_X86_EVENT_##name);
82+
83+
#include "perf_event_flags.h"
84+
85+
#undef PERF_ARCH
8886

8987
static inline bool is_topdown_count(struct perf_event *event)
9088
{

arch/x86/events/perf_event_flags.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
2+
/*
3+
* struct hw_perf_event.flags flags
4+
*/
5+
PERF_ARCH(PEBS_LDLAT, 0x00001) /* ld+ldlat data address sampling */
6+
PERF_ARCH(PEBS_ST, 0x00002) /* st data address sampling */
7+
PERF_ARCH(PEBS_ST_HSW, 0x00004) /* haswell style datala, store */
8+
PERF_ARCH(PEBS_LD_HSW, 0x00008) /* haswell style datala, load */
9+
PERF_ARCH(PEBS_NA_HSW, 0x00010) /* haswell style datala, unknown */
10+
PERF_ARCH(EXCL, 0x00020) /* HT exclusivity on counter */
11+
PERF_ARCH(DYNAMIC, 0x00040) /* dynamic alloc'd constraint */
12+
/* 0x00080 */
13+
PERF_ARCH(EXCL_ACCT, 0x00100) /* accounted EXCL event */
14+
PERF_ARCH(AUTO_RELOAD, 0x00200) /* use PEBS auto-reload */
15+
PERF_ARCH(LARGE_PEBS, 0x00400) /* use large PEBS */
16+
PERF_ARCH(PEBS_VIA_PT, 0x00800) /* use PT buffer for PEBS */
17+
PERF_ARCH(PAIR, 0x01000) /* Large Increment per Cycle */
18+
PERF_ARCH(LBR_SELECT, 0x02000) /* Save/Restore MSR_LBR_SELECT */
19+
PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */
20+
PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */
21+
PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */
22+
PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */

0 commit comments

Comments
 (0)