Skip to content

Commit f95e5a3

Browse files
melverPeter Zijlstra
authored andcommitted
powerpc/hw_breakpoint: Avoid relying on caller synchronization
Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint implementation have relied on nr_bp_mutex serializing access to them. Before overhauling synchronization of kernel/events/hw_breakpoint.c, introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively, thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint. Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Ian Rogers <irogers@google.com> Link: https://lore.kernel.org/r/20220829124719.675715-10-elver@google.com
1 parent 24198ad commit f95e5a3

File tree

1 file changed

+40
-13
lines changed

1 file changed

+40
-13
lines changed

arch/powerpc/kernel/hw_breakpoint.c

Lines changed: 40 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/kernel.h>
1616
#include <linux/sched.h>
1717
#include <linux/smp.h>
18+
#include <linux/spinlock.h>
1819
#include <linux/debugfs.h>
1920
#include <linux/init.h>
2021

@@ -129,7 +130,14 @@ struct breakpoint {
129130
bool ptrace_bp;
130131
};
131132

133+
/*
134+
* While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
135+
* rely on it safely synchronizing internals here; however, we can rely on it
136+
* not requesting more breakpoints than available.
137+
*/
138+
static DEFINE_SPINLOCK(cpu_bps_lock);
132139
static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
140+
static DEFINE_SPINLOCK(task_bps_lock);
133141
static LIST_HEAD(task_bps);
134142

135143
static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
@@ -174,14 +182,17 @@ static int task_bps_add(struct perf_event *bp)
174182
if (IS_ERR(tmp))
175183
return PTR_ERR(tmp);
176184

185+
spin_lock(&task_bps_lock);
177186
list_add(&tmp->list, &task_bps);
187+
spin_unlock(&task_bps_lock);
178188
return 0;
179189
}
180190

181191
static void task_bps_remove(struct perf_event *bp)
182192
{
183193
struct list_head *pos, *q;
184194

195+
spin_lock(&task_bps_lock);
185196
list_for_each_safe(pos, q, &task_bps) {
186197
struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
187198

@@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp)
191202
break;
192203
}
193204
}
205+
spin_unlock(&task_bps_lock);
194206
}
195207

196208
/*
@@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp)
200212
static bool all_task_bps_check(struct perf_event *bp)
201213
{
202214
struct breakpoint *tmp;
215+
bool ret = false;
203216

217+
spin_lock(&task_bps_lock);
204218
list_for_each_entry(tmp, &task_bps, list) {
205-
if (!can_co_exist(tmp, bp))
206-
return true;
219+
if (!can_co_exist(tmp, bp)) {
220+
ret = true;
221+
break;
222+
}
207223
}
208-
return false;
224+
spin_unlock(&task_bps_lock);
225+
return ret;
209226
}
210227

211228
/*
@@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp)
215232
static bool same_task_bps_check(struct perf_event *bp)
216233
{
217234
struct breakpoint *tmp;
235+
bool ret = false;
218236

237+
spin_lock(&task_bps_lock);
219238
list_for_each_entry(tmp, &task_bps, list) {
220239
if (tmp->bp->hw.target == bp->hw.target &&
221-
!can_co_exist(tmp, bp))
222-
return true;
240+
!can_co_exist(tmp, bp)) {
241+
ret = true;
242+
break;
243+
}
223244
}
224-
return false;
245+
spin_unlock(&task_bps_lock);
246+
return ret;
225247
}
226248

227249
static int cpu_bps_add(struct perf_event *bp)
@@ -234,13 +256,15 @@ static int cpu_bps_add(struct perf_event *bp)
234256
if (IS_ERR(tmp))
235257
return PTR_ERR(tmp);
236258

259+
spin_lock(&cpu_bps_lock);
237260
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
238261
for (i = 0; i < nr_wp_slots(); i++) {
239262
if (!cpu_bp[i]) {
240263
cpu_bp[i] = tmp;
241264
break;
242265
}
243266
}
267+
spin_unlock(&cpu_bps_lock);
244268
return 0;
245269
}
246270

@@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp)
249273
struct breakpoint **cpu_bp;
250274
int i = 0;
251275

276+
spin_lock(&cpu_bps_lock);
252277
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
253278
for (i = 0; i < nr_wp_slots(); i++) {
254279
if (!cpu_bp[i])
@@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp)
260285
break;
261286
}
262287
}
288+
spin_unlock(&cpu_bps_lock);
263289
}
264290

265291
static bool cpu_bps_check(int cpu, struct perf_event *bp)
266292
{
267293
struct breakpoint **cpu_bp;
294+
bool ret = false;
268295
int i;
269296

297+
spin_lock(&cpu_bps_lock);
270298
cpu_bp = per_cpu_ptr(cpu_bps, cpu);
271299
for (i = 0; i < nr_wp_slots(); i++) {
272-
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
273-
return true;
300+
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
301+
ret = true;
302+
break;
303+
}
274304
}
275-
return false;
305+
spin_unlock(&cpu_bps_lock);
306+
return ret;
276307
}
277308

278309
static bool all_cpu_bps_check(struct perf_event *bp)
@@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp)
286317
return false;
287318
}
288319

289-
/*
290-
* We don't use any locks to serialize accesses to cpu_bps or task_bps
291-
* because are already inside nr_bp_mutex.
292-
*/
293320
int arch_reserve_bp_slot(struct perf_event *bp)
294321
{
295322
int ret;

0 commit comments

Comments
 (0)