Skip to content

Commit a5df70c

Browse files
Andi KleenIngo Molnar
authored andcommitted
perf/x86: Only show format attributes when supported
Only show the Intel format attributes in sysfs when the feature is actually supported with the current model numbers. This allows programs to probe what format attributes are available, and give a sensible error message to users if they are not. This handles near all cases for intel attributes since Nehalem, except the (obscure) case when the model number if known, but PEBS is disabled in PERF_CAPABILITIES. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170822185201.9261-2-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent d061841 commit a5df70c

File tree

1 file changed

+41
-7
lines changed

1 file changed

+41
-7
lines changed

arch/x86/events/intel/core.c

Lines changed: 41 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = {
34153415
&format_attr_any.attr,
34163416
&format_attr_inv.attr,
34173417
&format_attr_cmask.attr,
3418+
NULL,
3419+
};
3420+
3421+
static struct attribute *hsw_format_attr[] = {
34183422
&format_attr_in_tx.attr,
34193423
&format_attr_in_tx_cp.attr,
3424+
&format_attr_offcore_rsp.attr,
3425+
&format_attr_ldlat.attr,
3426+
NULL
3427+
};
34203428

3421-
&format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
3422-
&format_attr_ldlat.attr, /* PEBS load latency */
3423-
NULL,
3429+
static struct attribute *nhm_format_attr[] = {
3430+
&format_attr_offcore_rsp.attr,
3431+
&format_attr_ldlat.attr,
3432+
NULL
3433+
};
3434+
3435+
static struct attribute *slm_format_attr[] = {
3436+
&format_attr_offcore_rsp.attr,
3437+
NULL
34243438
};
34253439

34263440
static struct attribute *skl_format_attr[] = {
@@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void)
37953809
unsigned int unused;
37963810
struct extra_reg *er;
37973811
int version, i;
3812+
struct attribute **extra_attr = NULL;
37983813

37993814
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
38003815
switch (boot_cpu_data.x86) {
@@ -3906,6 +3921,7 @@ __init int intel_pmu_init(void)
39063921
intel_pmu_pebs_data_source_nhm();
39073922
x86_add_quirk(intel_nehalem_quirk);
39083923
x86_pmu.pebs_no_tlb = 1;
3924+
extra_attr = nhm_format_attr;
39093925

39103926
pr_cont("Nehalem events, ");
39113927
break;
@@ -3941,6 +3957,7 @@ __init int intel_pmu_init(void)
39413957
x86_pmu.extra_regs = intel_slm_extra_regs;
39423958
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
39433959
x86_pmu.cpu_events = slm_events_attrs;
3960+
extra_attr = slm_format_attr;
39443961
pr_cont("Silvermont events, ");
39453962
break;
39463963

@@ -3966,6 +3983,7 @@ __init int intel_pmu_init(void)
39663983
x86_pmu.lbr_pt_coexist = true;
39673984
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
39683985
x86_pmu.cpu_events = glm_events_attrs;
3986+
extra_attr = slm_format_attr;
39693987
pr_cont("Goldmont events, ");
39703988
break;
39713989

@@ -3992,6 +4010,7 @@ __init int intel_pmu_init(void)
39924010
x86_pmu.cpu_events = glm_events_attrs;
39934011
/* Goldmont Plus has 4-wide pipeline */
39944012
event_attr_td_total_slots_scale_glm.event_str = "4";
4013+
extra_attr = slm_format_attr;
39954014
pr_cont("Goldmont plus events, ");
39964015
break;
39974016

@@ -4021,6 +4040,7 @@ __init int intel_pmu_init(void)
40214040
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
40224041

40234042
intel_pmu_pebs_data_source_nhm();
4043+
extra_attr = nhm_format_attr;
40244044
pr_cont("Westmere events, ");
40254045
break;
40264046

@@ -4057,6 +4077,8 @@ __init int intel_pmu_init(void)
40574077
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
40584078
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
40594079

4080+
extra_attr = nhm_format_attr;
4081+
40604082
pr_cont("SandyBridge events, ");
40614083
break;
40624084

@@ -4091,6 +4113,8 @@ __init int intel_pmu_init(void)
40914113
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
40924114
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
40934115

4116+
extra_attr = nhm_format_attr;
4117+
40944118
pr_cont("IvyBridge events, ");
40954119
break;
40964120

@@ -4119,6 +4143,8 @@ __init int intel_pmu_init(void)
41194143
x86_pmu.get_event_constraints = hsw_get_event_constraints;
41204144
x86_pmu.cpu_events = hsw_events_attrs;
41214145
x86_pmu.lbr_double_abort = true;
4146+
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4147+
hsw_format_attr : nhm_format_attr;
41224148
pr_cont("Haswell events, ");
41234149
break;
41244150

@@ -4155,6 +4181,8 @@ __init int intel_pmu_init(void)
41554181
x86_pmu.get_event_constraints = hsw_get_event_constraints;
41564182
x86_pmu.cpu_events = hsw_events_attrs;
41574183
x86_pmu.limit_period = bdw_limit_period;
4184+
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4185+
hsw_format_attr : nhm_format_attr;
41584186
pr_cont("Broadwell events, ");
41594187
break;
41604188

@@ -4173,7 +4201,7 @@ __init int intel_pmu_init(void)
41734201
/* all extra regs are per-cpu when HT is on */
41744202
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
41754203
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4176-
4204+
extra_attr = slm_format_attr;
41774205
pr_cont("Knights Landing/Mill events, ");
41784206
break;
41794207

@@ -4204,9 +4232,9 @@ __init int intel_pmu_init(void)
42044232

42054233
x86_pmu.hw_config = hsw_hw_config;
42064234
x86_pmu.get_event_constraints = hsw_get_event_constraints;
4207-
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4208-
skl_format_attr);
4209-
WARN_ON(!x86_pmu.format_attrs);
4235+
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4236+
hsw_format_attr : nhm_format_attr;
4237+
extra_attr = merge_attr(extra_attr, skl_format_attr);
42104238
x86_pmu.cpu_events = hsw_events_attrs;
42114239
intel_pmu_pebs_data_source_skl(
42124240
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
@@ -4229,6 +4257,12 @@ __init int intel_pmu_init(void)
42294257
}
42304258
}
42314259

4260+
if (version >= 2 && extra_attr) {
4261+
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4262+
extra_attr);
4263+
WARN_ON(!x86_pmu.format_attrs);
4264+
}
4265+
42324266
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
42334267
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
42344268
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);

0 commit comments

Comments
 (0)