15
15
#include <linux/kernel.h>
16
16
#include <linux/sched.h>
17
17
#include <linux/smp.h>
18
+ #include <linux/spinlock.h>
18
19
#include <linux/debugfs.h>
19
20
#include <linux/init.h>
20
21
@@ -129,7 +130,14 @@ struct breakpoint {
129
130
bool ptrace_bp ;
130
131
};
131
132
133
+ /*
134
+ * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
135
+ * rely on it safely synchronizing internals here; however, we can rely on it
136
+ * not requesting more breakpoints than available.
137
+ */
138
+ static DEFINE_SPINLOCK (cpu_bps_lock );
132
139
static DEFINE_PER_CPU (struct breakpoint * , cpu_bps [HBP_NUM_MAX ]) ;
140
+ static DEFINE_SPINLOCK (task_bps_lock );
133
141
static LIST_HEAD (task_bps );
134
142
135
143
static struct breakpoint * alloc_breakpoint (struct perf_event * bp )
@@ -174,14 +182,17 @@ static int task_bps_add(struct perf_event *bp)
174
182
if (IS_ERR (tmp ))
175
183
return PTR_ERR (tmp );
176
184
185
+ spin_lock (& task_bps_lock );
177
186
list_add (& tmp -> list , & task_bps );
187
+ spin_unlock (& task_bps_lock );
178
188
return 0 ;
179
189
}
180
190
181
191
static void task_bps_remove (struct perf_event * bp )
182
192
{
183
193
struct list_head * pos , * q ;
184
194
195
+ spin_lock (& task_bps_lock );
185
196
list_for_each_safe (pos , q , & task_bps ) {
186
197
struct breakpoint * tmp = list_entry (pos , struct breakpoint , list );
187
198
@@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp)
191
202
break ;
192
203
}
193
204
}
205
+ spin_unlock (& task_bps_lock );
194
206
}
195
207
196
208
/*
@@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp)
200
212
static bool all_task_bps_check (struct perf_event * bp )
201
213
{
202
214
struct breakpoint * tmp ;
215
+ bool ret = false;
203
216
217
+ spin_lock (& task_bps_lock );
204
218
list_for_each_entry (tmp , & task_bps , list ) {
205
- if (!can_co_exist (tmp , bp ))
206
- return true;
219
+ if (!can_co_exist (tmp , bp )) {
220
+ ret = true;
221
+ break ;
222
+ }
207
223
}
208
- return false;
224
+ spin_unlock (& task_bps_lock );
225
+ return ret ;
209
226
}
210
227
211
228
/*
@@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp)
215
232
static bool same_task_bps_check (struct perf_event * bp )
216
233
{
217
234
struct breakpoint * tmp ;
235
+ bool ret = false;
218
236
237
+ spin_lock (& task_bps_lock );
219
238
list_for_each_entry (tmp , & task_bps , list ) {
220
239
if (tmp -> bp -> hw .target == bp -> hw .target &&
221
- !can_co_exist (tmp , bp ))
222
- return true;
240
+ !can_co_exist (tmp , bp )) {
241
+ ret = true;
242
+ break ;
243
+ }
223
244
}
224
- return false;
245
+ spin_unlock (& task_bps_lock );
246
+ return ret ;
225
247
}
226
248
227
249
static int cpu_bps_add (struct perf_event * bp )
@@ -234,13 +256,15 @@ static int cpu_bps_add(struct perf_event *bp)
234
256
if (IS_ERR (tmp ))
235
257
return PTR_ERR (tmp );
236
258
259
+ spin_lock (& cpu_bps_lock );
237
260
cpu_bp = per_cpu_ptr (cpu_bps , bp -> cpu );
238
261
for (i = 0 ; i < nr_wp_slots (); i ++ ) {
239
262
if (!cpu_bp [i ]) {
240
263
cpu_bp [i ] = tmp ;
241
264
break ;
242
265
}
243
266
}
267
+ spin_unlock (& cpu_bps_lock );
244
268
return 0 ;
245
269
}
246
270
@@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp)
249
273
struct breakpoint * * cpu_bp ;
250
274
int i = 0 ;
251
275
276
+ spin_lock (& cpu_bps_lock );
252
277
cpu_bp = per_cpu_ptr (cpu_bps , bp -> cpu );
253
278
for (i = 0 ; i < nr_wp_slots (); i ++ ) {
254
279
if (!cpu_bp [i ])
@@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp)
260
285
break ;
261
286
}
262
287
}
288
+ spin_unlock (& cpu_bps_lock );
263
289
}
264
290
265
291
static bool cpu_bps_check (int cpu , struct perf_event * bp )
266
292
{
267
293
struct breakpoint * * cpu_bp ;
294
+ bool ret = false;
268
295
int i ;
269
296
297
+ spin_lock (& cpu_bps_lock );
270
298
cpu_bp = per_cpu_ptr (cpu_bps , cpu );
271
299
for (i = 0 ; i < nr_wp_slots (); i ++ ) {
272
- if (cpu_bp [i ] && !can_co_exist (cpu_bp [i ], bp ))
273
- return true;
300
+ if (cpu_bp [i ] && !can_co_exist (cpu_bp [i ], bp )) {
301
+ ret = true;
302
+ break ;
303
+ }
274
304
}
275
- return false;
305
+ spin_unlock (& cpu_bps_lock );
306
+ return ret ;
276
307
}
277
308
278
309
static bool all_cpu_bps_check (struct perf_event * bp )
@@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp)
286
317
return false;
287
318
}
288
319
289
- /*
290
- * We don't use any locks to serialize accesses to cpu_bps or task_bps
291
- * because are already inside nr_bp_mutex.
292
- */
293
320
int arch_reserve_bp_slot (struct perf_event * bp )
294
321
{
295
322
int ret ;
0 commit comments