@@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = {
3415
3415
& format_attr_any .attr ,
3416
3416
& format_attr_inv .attr ,
3417
3417
& format_attr_cmask .attr ,
3418
+ NULL ,
3419
+ };
3420
+
3421
+ static struct attribute * hsw_format_attr [] = {
3418
3422
& format_attr_in_tx .attr ,
3419
3423
& format_attr_in_tx_cp .attr ,
3424
+ & format_attr_offcore_rsp .attr ,
3425
+ & format_attr_ldlat .attr ,
3426
+ NULL
3427
+ };
3420
3428
3421
- & format_attr_offcore_rsp .attr , /* XXX do NHM/WSM + SNB breakout */
3422
- & format_attr_ldlat .attr , /* PEBS load latency */
3423
- NULL ,
3429
+ static struct attribute * nhm_format_attr [] = {
3430
+ & format_attr_offcore_rsp .attr ,
3431
+ & format_attr_ldlat .attr ,
3432
+ NULL
3433
+ };
3434
+
3435
+ static struct attribute * slm_format_attr [] = {
3436
+ & format_attr_offcore_rsp .attr ,
3437
+ NULL
3424
3438
};
3425
3439
3426
3440
static struct attribute * skl_format_attr [] = {
@@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void)
3795
3809
unsigned int unused ;
3796
3810
struct extra_reg * er ;
3797
3811
int version , i ;
3812
+ struct attribute * * extra_attr = NULL ;
3798
3813
3799
3814
if (!cpu_has (& boot_cpu_data , X86_FEATURE_ARCH_PERFMON )) {
3800
3815
switch (boot_cpu_data .x86 ) {
@@ -3906,6 +3921,7 @@ __init int intel_pmu_init(void)
3906
3921
intel_pmu_pebs_data_source_nhm ();
3907
3922
x86_add_quirk (intel_nehalem_quirk );
3908
3923
x86_pmu .pebs_no_tlb = 1 ;
3924
+ extra_attr = nhm_format_attr ;
3909
3925
3910
3926
pr_cont ("Nehalem events, " );
3911
3927
break ;
@@ -3941,6 +3957,7 @@ __init int intel_pmu_init(void)
3941
3957
x86_pmu .extra_regs = intel_slm_extra_regs ;
3942
3958
x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
3943
3959
x86_pmu .cpu_events = slm_events_attrs ;
3960
+ extra_attr = slm_format_attr ;
3944
3961
pr_cont ("Silvermont events, " );
3945
3962
break ;
3946
3963
@@ -3966,6 +3983,7 @@ __init int intel_pmu_init(void)
3966
3983
x86_pmu .lbr_pt_coexist = true;
3967
3984
x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
3968
3985
x86_pmu .cpu_events = glm_events_attrs ;
3986
+ extra_attr = slm_format_attr ;
3969
3987
pr_cont ("Goldmont events, " );
3970
3988
break ;
3971
3989
@@ -3992,6 +4010,7 @@ __init int intel_pmu_init(void)
3992
4010
x86_pmu .cpu_events = glm_events_attrs ;
3993
4011
/* Goldmont Plus has 4-wide pipeline */
3994
4012
event_attr_td_total_slots_scale_glm .event_str = "4" ;
4013
+ extra_attr = slm_format_attr ;
3995
4014
pr_cont ("Goldmont plus events, " );
3996
4015
break ;
3997
4016
@@ -4021,6 +4040,7 @@ __init int intel_pmu_init(void)
4021
4040
X86_CONFIG (.event = 0xb1 , .umask = 0x3f , .inv = 1 , .cmask = 1 );
4022
4041
4023
4042
intel_pmu_pebs_data_source_nhm ();
4043
+ extra_attr = nhm_format_attr ;
4024
4044
pr_cont ("Westmere events, " );
4025
4045
break ;
4026
4046
@@ -4057,6 +4077,8 @@ __init int intel_pmu_init(void)
4057
4077
intel_perfmon_event_map [PERF_COUNT_HW_STALLED_CYCLES_BACKEND ] =
4058
4078
X86_CONFIG (.event = 0xb1 , .umask = 0x01 , .inv = 1 , .cmask = 1 );
4059
4079
4080
+ extra_attr = nhm_format_attr ;
4081
+
4060
4082
pr_cont ("SandyBridge events, " );
4061
4083
break ;
4062
4084
@@ -4091,6 +4113,8 @@ __init int intel_pmu_init(void)
4091
4113
intel_perfmon_event_map [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] =
4092
4114
X86_CONFIG (.event = 0x0e , .umask = 0x01 , .inv = 1 , .cmask = 1 );
4093
4115
4116
+ extra_attr = nhm_format_attr ;
4117
+
4094
4118
pr_cont ("IvyBridge events, " );
4095
4119
break ;
4096
4120
@@ -4119,6 +4143,8 @@ __init int intel_pmu_init(void)
4119
4143
x86_pmu .get_event_constraints = hsw_get_event_constraints ;
4120
4144
x86_pmu .cpu_events = hsw_events_attrs ;
4121
4145
x86_pmu .lbr_double_abort = true;
4146
+ extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
4147
+ hsw_format_attr : nhm_format_attr ;
4122
4148
pr_cont ("Haswell events, " );
4123
4149
break ;
4124
4150
@@ -4155,6 +4181,8 @@ __init int intel_pmu_init(void)
4155
4181
x86_pmu .get_event_constraints = hsw_get_event_constraints ;
4156
4182
x86_pmu .cpu_events = hsw_events_attrs ;
4157
4183
x86_pmu .limit_period = bdw_limit_period ;
4184
+ extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
4185
+ hsw_format_attr : nhm_format_attr ;
4158
4186
pr_cont ("Broadwell events, " );
4159
4187
break ;
4160
4188
@@ -4173,7 +4201,7 @@ __init int intel_pmu_init(void)
4173
4201
/* all extra regs are per-cpu when HT is on */
4174
4202
x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
4175
4203
x86_pmu .flags |= PMU_FL_NO_HT_SHARING ;
4176
-
4204
+ extra_attr = slm_format_attr ;
4177
4205
pr_cont ("Knights Landing/Mill events, " );
4178
4206
break ;
4179
4207
@@ -4204,9 +4232,9 @@ __init int intel_pmu_init(void)
4204
4232
4205
4233
x86_pmu .hw_config = hsw_hw_config ;
4206
4234
x86_pmu .get_event_constraints = hsw_get_event_constraints ;
4207
- x86_pmu . format_attrs = merge_attr ( intel_arch3_formats_attr ,
4208
- skl_format_attr ) ;
4209
- WARN_ON (! x86_pmu . format_attrs );
4235
+ extra_attr = boot_cpu_has ( X86_FEATURE_RTM ) ?
4236
+ hsw_format_attr : nhm_format_attr ;
4237
+ extra_attr = merge_attr ( extra_attr , skl_format_attr );
4210
4238
x86_pmu .cpu_events = hsw_events_attrs ;
4211
4239
intel_pmu_pebs_data_source_skl (
4212
4240
boot_cpu_data .x86_model == INTEL_FAM6_SKYLAKE_X );
@@ -4229,6 +4257,12 @@ __init int intel_pmu_init(void)
4229
4257
}
4230
4258
}
4231
4259
4260
+ if (version >= 2 && extra_attr ) {
4261
+ x86_pmu .format_attrs = merge_attr (intel_arch3_formats_attr ,
4262
+ extra_attr );
4263
+ WARN_ON (!x86_pmu .format_attrs );
4264
+ }
4265
+
4232
4266
if (x86_pmu .num_counters > INTEL_PMC_MAX_GENERIC ) {
4233
4267
WARN (1 , KERN_ERR "hw perf events %d > max(%d), clipping!" ,
4234
4268
x86_pmu .num_counters , INTEL_PMC_MAX_GENERIC );
0 commit comments