Skip to content

Commit 706460a

Browse files
sandip4nPeter Zijlstra
authored andcommitted
perf/x86/amd/core: Add generic branch record interfaces
AMD processors that are capable of recording branches support either Branch Sampling (BRS) or Last Branch Record (LBR). In preparation for adding Last Branch Record Extension Version 2 (LbrExtV2) support, introduce new static calls which act as gateways to call into the feature-dependent functions based on what is available on the processor. Signed-off-by: Sandipan Das <sandipan.das@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/b75dbc32663cb395f0d701167e952c6a6b0445a3.1660211399.git.sandipan.das@amd.com
1 parent 9603aa7 commit 706460a

File tree

1 file changed

+22
-12
lines changed

1 file changed

+22
-12
lines changed

arch/x86/events/amd/core.c

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,8 @@ static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
330330
}
331331
}
332332

333+
DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
334+
333335
static int amd_core_hw_config(struct perf_event *event)
334336
{
335337
if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -349,7 +351,7 @@ static int amd_core_hw_config(struct perf_event *event)
349351
event->hw.flags |= PERF_X86_EVENT_PAIR;
350352

351353
if (has_branch_stack(event))
352-
return amd_brs_hw_config(event);
354+
return static_call(amd_pmu_branch_hw_config)(event);
353355

354356
return 0;
355357
}
@@ -518,8 +520,14 @@ static struct amd_nb *amd_alloc_nb(int cpu)
518520
return nb;
519521
}
520522

523+
typedef void (amd_pmu_branch_reset_t)(void);
524+
DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t);
525+
521526
static void amd_pmu_cpu_reset(int cpu)
522527
{
528+
if (x86_pmu.lbr_nr)
529+
static_call(amd_pmu_branch_reset)();
530+
523531
if (x86_pmu.version < 2)
524532
return;
525533

@@ -576,7 +584,6 @@ static void amd_pmu_cpu_starting(int cpu)
576584
cpuc->amd_nb->nb_id = nb_id;
577585
cpuc->amd_nb->refcnt++;
578586

579-
amd_brs_reset();
580587
amd_pmu_cpu_reset(cpu);
581588
}
582589

@@ -771,16 +778,20 @@ static void amd_pmu_v2_disable_all(void)
771778
amd_pmu_check_overflow();
772779
}
773780

781+
DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add);
782+
774783
static void amd_pmu_add_event(struct perf_event *event)
775784
{
776785
if (needs_branch_stack(event))
777-
amd_pmu_brs_add(event);
786+
static_call(amd_pmu_branch_add)(event);
778787
}
779788

789+
DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del);
790+
780791
static void amd_pmu_del_event(struct perf_event *event)
781792
{
782793
if (needs_branch_stack(event))
783-
amd_pmu_brs_del(event);
794+
static_call(amd_pmu_branch_del)(event);
784795
}
785796

786797
/*
@@ -1184,13 +1195,6 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
11841195
return x86_event_sysfs_show(page, config, event);
11851196
}
11861197

1187-
static void amd_pmu_sched_task(struct perf_event_context *ctx,
1188-
bool sched_in)
1189-
{
1190-
if (sched_in && x86_pmu.lbr_nr)
1191-
amd_pmu_brs_sched_task(ctx, sched_in);
1192-
}
1193-
11941198
static u64 amd_pmu_limit_period(struct perf_event *event, u64 left)
11951199
{
11961200
/*
@@ -1375,8 +1379,14 @@ static int __init amd_core_pmu_init(void)
13751379
*/
13761380
if (boot_cpu_data.x86 >= 0x19 && !amd_brs_init()) {
13771381
x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
1378-
x86_pmu.sched_task = amd_pmu_sched_task;
1382+
x86_pmu.sched_task = amd_pmu_brs_sched_task;
13791383
x86_pmu.limit_period = amd_pmu_limit_period;
1384+
1385+
static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config);
1386+
static_call_update(amd_pmu_branch_reset, amd_brs_reset);
1387+
static_call_update(amd_pmu_branch_add, amd_pmu_brs_add);
1388+
static_call_update(amd_pmu_branch_del, amd_pmu_brs_del);
1389+
13801390
/*
13811391
* put_event_constraints callback same as Fam17h, set above
13821392
*/

0 commit comments

Comments
 (0)