Skip to content

Commit 0912037

Browse files
melverPeter Zijlstra
authored andcommitted
perf/hw_breakpoint: Reduce contention with large number of tasks
While optimizing task_bp_pinned()'s runtime complexity to O(1) on average helps reduce time spent in the critical section, we still suffer due to serializing everything via 'nr_bp_mutex'. Indeed, a profile shows that now contention is the biggest issue: 95.93% [kernel] [k] osq_lock 0.70% [kernel] [k] mutex_spin_on_owner 0.22% [kernel] [k] smp_cfm_core_cond 0.18% [kernel] [k] task_bp_pinned 0.18% [kernel] [k] rhashtable_jhash2 0.15% [kernel] [k] queued_spin_lock_slowpath when running the breakpoint benchmark with (system with 256 CPUs): | $> perf bench -r 30 breakpoint thread -b 4 -p 64 -t 64 | # Running 'breakpoint/thread' benchmark: | # Created/joined 30 threads with 4 breakpoints and 64 parallelism | Total time: 0.207 [sec] | | 108.267188 usecs/op | 6929.100000 usecs/op/cpu The main concern for synchronizing the breakpoint constraints data is that a consistent snapshot of the per-CPU and per-task data is observed. The access pattern is as follows: 1. If the target is a task: the task's pinned breakpoints are counted, checked for space, and then appended to; only bp_cpuinfo::cpu_pinned is used to check for conflicts with CPU-only breakpoints; bp_cpuinfo::tsk_pinned are incremented/decremented, but otherwise unused. 2. If the target is a CPU: bp_cpuinfo::cpu_pinned are counted, along with bp_cpuinfo::tsk_pinned; after a successful check, cpu_pinned is incremented. No per-task breakpoints are checked. Since rhltable safely synchronizes insertions/deletions, we can allow concurrency as follows: 1. If the target is a task: independent tasks may update and check the constraints concurrently, but same-task target calls need to be serialized; since bp_cpuinfo::tsk_pinned is only updated, but not checked, these modifications can happen concurrently by switching tsk_pinned to atomic_t. 2. If the target is a CPU: access to the per-CPU constraints needs to be serialized with other CPU-target and task-target callers (to stabilize the bp_cpuinfo::tsk_pinned snapshot). We can allow the above concurrency by introducing a per-CPU constraints data reader-writer lock (bp_cpuinfo_sem), and per-task mutexes (reuses task_struct::perf_event_mutex): 1. If the target is a task: acquires perf_event_mutex, and acquires bp_cpuinfo_sem as a reader. The choice of percpu-rwsem minimizes contention in the presence of many read-lock but few write-lock acquisitions: we assume many orders of magnitude more task target breakpoints creations/destructions than CPU target breakpoints. 2. If the target is a CPU: acquires bp_cpuinfo_sem as a writer. With these changes, contention with thousands of tasks is reduced to the point where waiting on locking no longer dominates the profile: | $> perf bench -r 30 breakpoint thread -b 4 -p 64 -t 64 | # Running 'breakpoint/thread' benchmark: | # Created/joined 30 threads with 4 breakpoints and 64 parallelism | Total time: 0.077 [sec] | | 40.201563 usecs/op | 2572.900000 usecs/op/cpu 21.54% [kernel] [k] task_bp_pinned 20.18% [kernel] [k] rhashtable_jhash2 6.81% [kernel] [k] toggle_bp_slot 5.47% [kernel] [k] queued_spin_lock_slowpath 3.75% [kernel] [k] smp_cfm_core_cond 3.48% [kernel] [k] bcmp On this particular setup that's a speedup of 2.7x. We're also getting closer to the theoretical ideal performance through optimizations in hw_breakpoint.c -- constraints accounting disabled: | perf bench -r 30 breakpoint thread -b 4 -p 64 -t 64 | # Running 'breakpoint/thread' benchmark: | # Created/joined 30 threads with 4 breakpoints and 64 parallelism | Total time: 0.067 [sec] | | 35.286458 usecs/op | 2258.333333 usecs/op/cpu Which means the current implementation is ~12% slower than the theoretical ideal. For reference, performance without any breakpoints: | $> bench -r 30 breakpoint thread -b 0 -p 64 -t 64 | # Running 'breakpoint/thread' benchmark: | # Created/joined 30 threads with 0 breakpoints and 64 parallelism | Total time: 0.060 [sec] | | 31.365625 usecs/op | 2007.400000 usecs/op/cpu On a system with 256 CPUs, the theoretical ideal is only ~12% slower than no breakpoints at all; the current implementation is ~28% slower. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Ian Rogers <irogers@google.com> Link: https://lore.kernel.org/r/20220829124719.675715-12-elver@google.com
1 parent 01fe8a3 commit 0912037

File tree

1 file changed

+133
-28
lines changed

1 file changed

+133
-28
lines changed

kernel/events/hw_breakpoint.c

Lines changed: 133 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
#include <linux/hw_breakpoint.h>
2121

22+
#include <linux/atomic.h>
2223
#include <linux/bug.h>
2324
#include <linux/cpu.h>
2425
#include <linux/export.h>
@@ -28,6 +29,7 @@
2829
#include <linux/kernel.h>
2930
#include <linux/mutex.h>
3031
#include <linux/notifier.h>
32+
#include <linux/percpu-rwsem.h>
3133
#include <linux/percpu.h>
3234
#include <linux/rhashtable.h>
3335
#include <linux/sched.h>
@@ -41,9 +43,9 @@ struct bp_cpuinfo {
4143
unsigned int cpu_pinned;
4244
/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
4345
#ifdef hw_breakpoint_slots
44-
unsigned int tsk_pinned[hw_breakpoint_slots(0)];
46+
atomic_t tsk_pinned[hw_breakpoint_slots(0)];
4547
#else
46-
unsigned int *tsk_pinned;
48+
atomic_t *tsk_pinned;
4749
#endif
4850
};
4951

@@ -65,8 +67,79 @@ static const struct rhashtable_params task_bps_ht_params = {
6567

6668
static bool constraints_initialized __ro_after_init;
6769

68-
/* Serialize accesses to the above constraints */
69-
static DEFINE_MUTEX(nr_bp_mutex);
70+
/*
71+
* Synchronizes accesses to the per-CPU constraints; the locking rules are:
72+
*
73+
* 1. Atomic updates to bp_cpuinfo::tsk_pinned only require a held read-lock
74+
* (due to bp_slots_histogram::count being atomic, no update are lost).
75+
*
76+
* 2. Holding a write-lock is required for computations that require a
77+
* stable snapshot of all bp_cpuinfo::tsk_pinned.
78+
*
79+
* 3. In all other cases, non-atomic accesses require the appropriately held
80+
* lock (read-lock for read-only accesses; write-lock for reads/writes).
81+
*/
82+
DEFINE_STATIC_PERCPU_RWSEM(bp_cpuinfo_sem);
83+
84+
/*
85+
* Return mutex to serialize accesses to per-task lists in task_bps_ht. Since
86+
* rhltable synchronizes concurrent insertions/deletions, independent tasks may
87+
* insert/delete concurrently; therefore, a mutex per task is sufficient.
88+
*
89+
* Uses task_struct::perf_event_mutex, to avoid extending task_struct with a
90+
* hw_breakpoint-only mutex, which may be infrequently used. The caveat here is
91+
* that hw_breakpoint may contend with per-task perf event list management. The
92+
* assumption is that perf usecases involving hw_breakpoints are very unlikely
93+
* to result in unnecessary contention.
94+
*/
95+
static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
96+
{
97+
struct task_struct *tsk = bp->hw.target;
98+
99+
return tsk ? &tsk->perf_event_mutex : NULL;
100+
}
101+
102+
static struct mutex *bp_constraints_lock(struct perf_event *bp)
103+
{
104+
struct mutex *tsk_mtx = get_task_bps_mutex(bp);
105+
106+
if (tsk_mtx) {
107+
mutex_lock(tsk_mtx);
108+
percpu_down_read(&bp_cpuinfo_sem);
109+
} else {
110+
percpu_down_write(&bp_cpuinfo_sem);
111+
}
112+
113+
return tsk_mtx;
114+
}
115+
116+
static void bp_constraints_unlock(struct mutex *tsk_mtx)
117+
{
118+
if (tsk_mtx) {
119+
percpu_up_read(&bp_cpuinfo_sem);
120+
mutex_unlock(tsk_mtx);
121+
} else {
122+
percpu_up_write(&bp_cpuinfo_sem);
123+
}
124+
}
125+
126+
static bool bp_constraints_is_locked(struct perf_event *bp)
127+
{
128+
struct mutex *tsk_mtx = get_task_bps_mutex(bp);
129+
130+
return percpu_is_write_locked(&bp_cpuinfo_sem) ||
131+
(tsk_mtx ? mutex_is_locked(tsk_mtx) :
132+
percpu_is_read_locked(&bp_cpuinfo_sem));
133+
}
134+
135+
static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
136+
{
137+
struct mutex *tsk_mtx = get_task_bps_mutex(bp);
138+
139+
if (tsk_mtx)
140+
lockdep_assert_held(tsk_mtx);
141+
lockdep_assert_held(&bp_cpuinfo_sem);
142+
}
70143

71144
#ifdef hw_breakpoint_slots
72145
/*
@@ -97,7 +170,7 @@ static __init int init_breakpoint_slots(void)
97170
for (i = 0; i < TYPE_MAX; i++) {
98171
struct bp_cpuinfo *info = get_bp_info(cpu, i);
99172

100-
info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(int), GFP_KERNEL);
173+
info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(atomic_t), GFP_KERNEL);
101174
if (!info->tsk_pinned)
102175
goto err;
103176
}
@@ -137,11 +210,19 @@ static inline enum bp_type_idx find_slot_idx(u64 bp_type)
137210
*/
138211
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
139212
{
140-
unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
213+
atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
141214
int i;
142215

216+
/*
217+
* At this point we want to have acquired the bp_cpuinfo_sem as a
218+
* writer to ensure that there are no concurrent writers in
219+
* toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
220+
*/
221+
lockdep_assert_held_write(&bp_cpuinfo_sem);
222+
143223
for (i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
144-
if (tsk_pinned[i] > 0)
224+
ASSERT_EXCLUSIVE_WRITER(tsk_pinned[i]); /* Catch unexpected writers. */
225+
if (atomic_read(&tsk_pinned[i]) > 0)
145226
return i + 1;
146227
}
147228

@@ -158,6 +239,11 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
158239
struct perf_event *iter;
159240
int count = 0;
160241

242+
/*
243+
* We need a stable snapshot of the per-task breakpoint list.
244+
*/
245+
assert_bp_constraints_lock_held(bp);
246+
161247
rcu_read_lock();
162248
head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params);
163249
if (!head)
@@ -214,16 +300,25 @@ max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
214300
static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
215301
enum bp_type_idx type, int weight)
216302
{
217-
unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
303+
atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
218304
int old_idx, new_idx;
219305

306+
/*
307+
* If bp->hw.target, tsk_pinned is only modified, but not used
308+
* otherwise. We can permit concurrent updates as long as there are no
309+
* other uses: having acquired bp_cpuinfo_sem as a reader allows
310+
* concurrent updates here. Uses of tsk_pinned will require acquiring
311+
* bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
312+
*/
313+
lockdep_assert_held_read(&bp_cpuinfo_sem);
314+
220315
old_idx = task_bp_pinned(cpu, bp, type) - 1;
221316
new_idx = old_idx + weight;
222317

223318
if (old_idx >= 0)
224-
tsk_pinned[old_idx]--;
319+
atomic_dec(&tsk_pinned[old_idx]);
225320
if (new_idx >= 0)
226-
tsk_pinned[new_idx]++;
321+
atomic_inc(&tsk_pinned[new_idx]);
227322
}
228323

229324
/*
@@ -241,6 +336,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
241336

242337
/* Pinned counter cpu profiling */
243338
if (!bp->hw.target) {
339+
lockdep_assert_held_write(&bp_cpuinfo_sem);
244340
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
245341
return 0;
246342
}
@@ -249,6 +345,11 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
249345
for_each_cpu(cpu, cpumask)
250346
toggle_bp_task_slot(bp, cpu, type, weight);
251347

348+
/*
349+
* Readers want a stable snapshot of the per-task breakpoint list.
350+
*/
351+
assert_bp_constraints_lock_held(bp);
352+
252353
if (enable)
253354
return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
254355
else
@@ -354,14 +455,10 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
354455

355456
int reserve_bp_slot(struct perf_event *bp)
356457
{
357-
int ret;
358-
359-
mutex_lock(&nr_bp_mutex);
360-
361-
ret = __reserve_bp_slot(bp, bp->attr.bp_type);
362-
363-
mutex_unlock(&nr_bp_mutex);
458+
struct mutex *mtx = bp_constraints_lock(bp);
459+
int ret = __reserve_bp_slot(bp, bp->attr.bp_type);
364460

461+
bp_constraints_unlock(mtx);
365462
return ret;
366463
}
367464

@@ -379,12 +476,11 @@ static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
379476

380477
void release_bp_slot(struct perf_event *bp)
381478
{
382-
mutex_lock(&nr_bp_mutex);
479+
struct mutex *mtx = bp_constraints_lock(bp);
383480

384481
arch_unregister_hw_breakpoint(bp);
385482
__release_bp_slot(bp, bp->attr.bp_type);
386-
387-
mutex_unlock(&nr_bp_mutex);
483+
bp_constraints_unlock(mtx);
388484
}
389485

390486
static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
@@ -411,11 +507,10 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
411507

412508
static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
413509
{
414-
int ret;
510+
struct mutex *mtx = bp_constraints_lock(bp);
511+
int ret = __modify_bp_slot(bp, old_type, new_type);
415512

416-
mutex_lock(&nr_bp_mutex);
417-
ret = __modify_bp_slot(bp, old_type, new_type);
418-
mutex_unlock(&nr_bp_mutex);
513+
bp_constraints_unlock(mtx);
419514
return ret;
420515
}
421516

@@ -426,18 +521,28 @@ static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
426521
*/
427522
int dbg_reserve_bp_slot(struct perf_event *bp)
428523
{
429-
if (mutex_is_locked(&nr_bp_mutex))
524+
int ret;
525+
526+
if (bp_constraints_is_locked(bp))
430527
return -1;
431528

432-
return __reserve_bp_slot(bp, bp->attr.bp_type);
529+
/* Locks aren't held; disable lockdep assert checking. */
530+
lockdep_off();
531+
ret = __reserve_bp_slot(bp, bp->attr.bp_type);
532+
lockdep_on();
533+
534+
return ret;
433535
}
434536

435537
int dbg_release_bp_slot(struct perf_event *bp)
436538
{
437-
if (mutex_is_locked(&nr_bp_mutex))
539+
if (bp_constraints_is_locked(bp))
438540
return -1;
439541

542+
/* Locks aren't held; disable lockdep assert checking. */
543+
lockdep_off();
440544
__release_bp_slot(bp, bp->attr.bp_type);
545+
lockdep_on();
441546

442547
return 0;
443548
}
@@ -663,7 +768,7 @@ bool hw_breakpoint_is_used(void)
663768
return true;
664769

665770
for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
666-
if (info->tsk_pinned[slot])
771+
if (atomic_read(&info->tsk_pinned[slot]))
667772
return true;
668773
}
669774
}

0 commit comments

Comments
 (0)