Blame view
kernel/events/hw_breakpoint.c
15.5 KB
62a038d34 hw-breakpoints: i... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2007 Alan Stern * Copyright (C) IBM Corporation, 2009 |
24f1e32c6 hw-breakpoints: R... |
18 |
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com> |
ba1c813a6 hw-breakpoints: A... |
19 20 |
* * Thanks to Ingo Molnar for his many suggestions. |
ba6909b71 hw-breakpoint: At... |
21 22 23 24 |
* * Authors: Alan Stern <stern@rowland.harvard.edu> * K.Prasad <prasad@linux.vnet.ibm.com> * Frederic Weisbecker <fweisbec@gmail.com> |
62a038d34 hw-breakpoints: i... |
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
*/ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. * This file contains the arch-independent routines. */ #include <linux/irqflags.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/init.h> |
feef47d0c hw-breakpoints: G... |
43 |
#include <linux/slab.h> |
45a73372e hw_breakpoints: F... |
44 |
#include <linux/list.h> |
88f7a890d ksym_tracer: Fix ... |
45 |
#include <linux/cpu.h> |
62a038d34 hw-breakpoints: i... |
46 |
#include <linux/smp.h> |
24f1e32c6 hw-breakpoints: R... |
47 |
#include <linux/hw_breakpoint.h> |
0102752e4 hw-breakpoints: S... |
48 |
|
ba1c813a6 hw-breakpoints: A... |
49 50 51 |
/* * Constraints data */ |
62a038d34 hw-breakpoints: i... |
52 |
|
ba1c813a6 hw-breakpoints: A... |
53 |
/* Number of pinned cpu breakpoints in a cpu */ |
0102752e4 hw-breakpoints: S... |
54 |
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); |
ba1c813a6 hw-breakpoints: A... |
55 56 |
/* Number of pinned task breakpoints in a cpu */ |
777d0411c hw_breakpoints: F... |
57 |
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); |
ba1c813a6 hw-breakpoints: A... |
58 59 |
/* Number of non-pinned cpu/task breakpoints in a cpu */ |
0102752e4 hw-breakpoints: S... |
60 |
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); |
ba1c813a6 hw-breakpoints: A... |
61 |
|
feef47d0c hw-breakpoints: G... |
62 |
static int nr_slots[TYPE_MAX]; |
45a73372e hw_breakpoints: F... |
63 64 |
/* Keep track of the breakpoints attached to tasks */ static LIST_HEAD(bp_task_head); |
feef47d0c hw-breakpoints: G... |
65 |
static int constraints_initialized; |
ba1c813a6 hw-breakpoints: A... |
66 67 68 69 70 71 72 73 |
/* Gather the number of total pinned and un-pinned bp in a cpuset */ struct bp_busy_slots { unsigned int pinned; unsigned int flexible; }; /* Serialize accesses to the above constraints */ static DEFINE_MUTEX(nr_bp_mutex); |
f93a20541 hw-breakpoints: H... |
74 75 76 77 |
__weak int hw_breakpoint_weight(struct perf_event *bp) { return 1; } |
0102752e4 hw-breakpoints: S... |
78 79 80 81 82 83 84 |
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) { if (bp->attr.bp_type & HW_BREAKPOINT_RW) return TYPE_DATA; return TYPE_INST; } |
ba1c813a6 hw-breakpoints: A... |
85 86 87 88 |
/* * Report the maximum number of pinned breakpoints a task * have in this cpu */ |
0102752e4 hw-breakpoints: S... |
89 |
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) |
62a038d34 hw-breakpoints: i... |
90 |
{ |
ba1c813a6 hw-breakpoints: A... |
91 |
int i; |
0102752e4 hw-breakpoints: S... |
92 |
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
62a038d34 hw-breakpoints: i... |
93 |
|
feef47d0c hw-breakpoints: G... |
94 |
for (i = nr_slots[type] - 1; i >= 0; i--) { |
ba1c813a6 hw-breakpoints: A... |
95 96 |
if (tsk_pinned[i] > 0) return i + 1; |
62a038d34 hw-breakpoints: i... |
97 |
} |
24f1e32c6 hw-breakpoints: R... |
98 |
return 0; |
62a038d34 hw-breakpoints: i... |
99 |
} |
45a73372e hw_breakpoints: F... |
100 101 102 103 104 |
/* * Count the number of breakpoints of the same type and same task. * The given event must be not on the list. */ static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) |
56053170e hw-breakpoints: F... |
105 |
{ |
d580ff869 perf, hw_breakpoi... |
106 |
struct task_struct *tsk = bp->hw.bp_target; |
45a73372e hw_breakpoints: F... |
107 |
struct perf_event *iter; |
56053170e hw-breakpoints: F... |
108 |
int count = 0; |
45a73372e hw_breakpoints: F... |
109 |
list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
d580ff869 perf, hw_breakpoi... |
110 |
if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) |
45a73372e hw_breakpoints: F... |
111 |
count += hw_breakpoint_weight(iter); |
56053170e hw-breakpoints: F... |
112 |
} |
56053170e hw-breakpoints: F... |
113 114 |
return count; } |
ba1c813a6 hw-breakpoints: A... |
115 116 117 118 |
/* * Report the number of pinned/un-pinned breakpoints we have in * a given cpu (cpu > -1) or in all of them (cpu = -1). */ |
56053170e hw-breakpoints: F... |
119 |
static void |
0102752e4 hw-breakpoints: S... |
120 121 |
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, enum bp_type_idx type) |
ba1c813a6 hw-breakpoints: A... |
122 |
{ |
56053170e hw-breakpoints: F... |
123 |
int cpu = bp->cpu; |
d580ff869 perf, hw_breakpoi... |
124 |
struct task_struct *tsk = bp->hw.bp_target; |
56053170e hw-breakpoints: F... |
125 |
|
ba1c813a6 hw-breakpoints: A... |
126 |
if (cpu >= 0) { |
0102752e4 hw-breakpoints: S... |
127 |
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170e hw-breakpoints: F... |
128 |
if (!tsk) |
0102752e4 hw-breakpoints: S... |
129 |
slots->pinned += max_task_bp_pinned(cpu, type); |
56053170e hw-breakpoints: F... |
130 |
else |
45a73372e hw_breakpoints: F... |
131 |
slots->pinned += task_bp_pinned(bp, type); |
0102752e4 hw-breakpoints: S... |
132 |
slots->flexible = per_cpu(nr_bp_flexible[type], cpu); |
ba1c813a6 hw-breakpoints: A... |
133 134 135 136 137 138 |
return; } for_each_online_cpu(cpu) { unsigned int nr; |
0102752e4 hw-breakpoints: S... |
139 |
nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
56053170e hw-breakpoints: F... |
140 |
if (!tsk) |
0102752e4 hw-breakpoints: S... |
141 |
nr += max_task_bp_pinned(cpu, type); |
56053170e hw-breakpoints: F... |
142 |
else |
45a73372e hw_breakpoints: F... |
143 |
nr += task_bp_pinned(bp, type); |
ba1c813a6 hw-breakpoints: A... |
144 145 146 |
if (nr > slots->pinned) slots->pinned = nr; |
0102752e4 hw-breakpoints: S... |
147 |
nr = per_cpu(nr_bp_flexible[type], cpu); |
ba1c813a6 hw-breakpoints: A... |
148 149 150 151 152 153 154 |
if (nr > slots->flexible) slots->flexible = nr; } } /* |
f93a20541 hw-breakpoints: H... |
155 156 157 158 159 160 161 162 163 164 165 |
* For now, continue to consider flexible as pinned, until we can * ensure no flexible event can ever be scheduled before a pinned event * in a same cpu. */ static void fetch_this_slot(struct bp_busy_slots *slots, int weight) { slots->pinned += weight; } /* |
ba1c813a6 hw-breakpoints: A... |
166 167 |
* Add a pinned breakpoint for the given task in our constraint table */ |
45a73372e hw_breakpoints: F... |
168 |
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, |
f93a20541 hw-breakpoints: H... |
169 |
enum bp_type_idx type, int weight) |
ba1c813a6 hw-breakpoints: A... |
170 |
{ |
11e663576 kernel/hw_breakpo... |
171 |
unsigned int *tsk_pinned; |
f93a20541 hw-breakpoints: H... |
172 173 174 |
int old_count = 0; int old_idx = 0; int idx = 0; |
ba1c813a6 hw-breakpoints: A... |
175 |
|
45a73372e hw_breakpoints: F... |
176 |
old_count = task_bp_pinned(bp, type); |
f93a20541 hw-breakpoints: H... |
177 178 |
old_idx = old_count - 1; idx = old_idx + weight; |
ba1c813a6 hw-breakpoints: A... |
179 |
|
45a73372e hw_breakpoints: F... |
180 |
/* tsk_pinned[n] is the number of tasks having n breakpoints */ |
0102752e4 hw-breakpoints: S... |
181 |
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
ba1c813a6 hw-breakpoints: A... |
182 |
if (enable) { |
f93a20541 hw-breakpoints: H... |
183 184 185 |
tsk_pinned[idx]++; if (old_count > 0) tsk_pinned[old_idx]--; |
ba1c813a6 hw-breakpoints: A... |
186 |
} else { |
f93a20541 hw-breakpoints: H... |
187 188 189 |
tsk_pinned[idx]--; if (old_count > 0) tsk_pinned[old_idx]++; |
ba1c813a6 hw-breakpoints: A... |
190 191 192 193 194 195 |
} } /* * Add/remove the given breakpoint in our constraint table */ |
0102752e4 hw-breakpoints: S... |
196 |
static void |
f93a20541 hw-breakpoints: H... |
197 198 |
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight) |
ba1c813a6 hw-breakpoints: A... |
199 200 |
{ int cpu = bp->cpu; |
d580ff869 perf, hw_breakpoi... |
201 |
struct task_struct *tsk = bp->hw.bp_target; |
ba1c813a6 hw-breakpoints: A... |
202 |
|
45a73372e hw_breakpoints: F... |
203 204 205 206 207 208 209 210 211 |
/* Pinned counter cpu profiling */ if (!tsk) { if (enable) per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; else per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; return; } |
ba1c813a6 hw-breakpoints: A... |
212 |
/* Pinned counter task profiling */ |
ba1c813a6 hw-breakpoints: A... |
213 |
|
45a73372e hw_breakpoints: F... |
214 215 216 217 218 219 |
if (!enable) list_del(&bp->hw.bp_list); if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { |
ba1c813a6 hw-breakpoints: A... |
220 |
for_each_online_cpu(cpu) |
45a73372e hw_breakpoints: F... |
221 |
toggle_bp_task_slot(bp, cpu, enable, type, weight); |
ba1c813a6 hw-breakpoints: A... |
222 |
} |
ba1c813a6 hw-breakpoints: A... |
223 |
if (enable) |
45a73372e hw_breakpoints: F... |
224 |
list_add_tail(&bp->hw.bp_list, &bp_task_head); |
ba1c813a6 hw-breakpoints: A... |
225 226 227 |
} /* |
f7136c515 hw_breakpoints: A... |
228 229 230 231 232 233 234 235 236 237 238 |
* Function to perform processor-specific cleanup during unregistration */ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) { /* * A weak stub function here for those archs that don't define * it inside arch/.../kernel/hw_breakpoint.c */ } /* |
ba1c813a6 hw-breakpoints: A... |
239 240 241 242 243 244 245 |
* Contraints to check before allowing this new breakpoint counter: * * == Non-pinned counter == (Considered as pinned for now) * * - If attached to a single cpu, check: * * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
6ab888632 perf: hw_breakpoi... |
246 |
* + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
ba1c813a6 hw-breakpoints: A... |
247 248 249 250 251 252 253 254 255 256 |
* * -> If there are already non-pinned counters in this cpu, it means * there is already a free slot for them. * Otherwise, we check that the maximum number of per task * breakpoints (for this cpu) plus the number of per cpu breakpoint * (for this cpu) doesn't cover every registers. * * - If attached to every cpus, check: * * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
6ab888632 perf: hw_breakpoi... |
257 |
* + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
ba1c813a6 hw-breakpoints: A... |
258 259 260 261 262 263 264 265 266 267 268 |
* * -> This is roughly the same, except we check the number of per cpu * bp for every cpu and we keep the max one. Same for the per tasks * breakpoints. * * * == Pinned counter == * * - If attached to a single cpu, check: * * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
6ab888632 perf: hw_breakpoi... |
269 |
* + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
ba1c813a6 hw-breakpoints: A... |
270 271 272 273 274 275 276 |
* * -> Same checks as before. But now the nr_bp_flexible, if any, must keep * one register at least (or they will never be fed). * * - If attached to every cpus, check: * * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
6ab888632 perf: hw_breakpoi... |
277 |
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
ba1c813a6 hw-breakpoints: A... |
278 |
*/ |
5352ae638 perf, hw_breakpoi... |
279 |
static int __reserve_bp_slot(struct perf_event *bp) |
ba1c813a6 hw-breakpoints: A... |
280 281 |
{ struct bp_busy_slots slots = {0}; |
0102752e4 hw-breakpoints: S... |
282 |
enum bp_type_idx type; |
f93a20541 hw-breakpoints: H... |
283 |
int weight; |
ba1c813a6 hw-breakpoints: A... |
284 |
|
feef47d0c hw-breakpoints: G... |
285 286 287 |
/* We couldn't initialize breakpoint constraints on boot */ if (!constraints_initialized) return -ENOMEM; |
0102752e4 hw-breakpoints: S... |
288 289 290 291 292 293 |
/* Basic checks */ if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || bp->attr.bp_type == HW_BREAKPOINT_INVALID) return -EINVAL; type = find_slot_idx(bp); |
f93a20541 hw-breakpoints: H... |
294 |
weight = hw_breakpoint_weight(bp); |
0102752e4 hw-breakpoints: S... |
295 |
fetch_bp_busy_slots(&slots, bp, type); |
45a73372e hw_breakpoints: F... |
296 297 298 299 |
/* * Simulate the addition of this breakpoint to the constraints * and see the result. */ |
f93a20541 hw-breakpoints: H... |
300 |
fetch_this_slot(&slots, weight); |
ba1c813a6 hw-breakpoints: A... |
301 302 |
/* Flexible counters need to keep at least one slot */ |
feef47d0c hw-breakpoints: G... |
303 |
if (slots.pinned + (!!slots.flexible) > nr_slots[type]) |
5352ae638 perf, hw_breakpoi... |
304 |
return -ENOSPC; |
ba1c813a6 hw-breakpoints: A... |
305 |
|
f93a20541 hw-breakpoints: H... |
306 |
toggle_bp_slot(bp, true, type, weight); |
ba1c813a6 hw-breakpoints: A... |
307 |
|
5352ae638 perf, hw_breakpoi... |
308 309 310 311 312 313 314 315 316 317 |
return 0; } int reserve_bp_slot(struct perf_event *bp) { int ret; mutex_lock(&nr_bp_mutex); ret = __reserve_bp_slot(bp); |
ba1c813a6 hw-breakpoints: A... |
318 319 320 321 |
mutex_unlock(&nr_bp_mutex); return ret; } |
5352ae638 perf, hw_breakpoi... |
322 323 |
static void __release_bp_slot(struct perf_event *bp) { |
0102752e4 hw-breakpoints: S... |
324 |
enum bp_type_idx type; |
f93a20541 hw-breakpoints: H... |
325 |
int weight; |
0102752e4 hw-breakpoints: S... |
326 327 |
type = find_slot_idx(bp); |
f93a20541 hw-breakpoints: H... |
328 329 |
weight = hw_breakpoint_weight(bp); toggle_bp_slot(bp, false, type, weight); |
5352ae638 perf, hw_breakpoi... |
330 |
} |
24f1e32c6 hw-breakpoints: R... |
331 |
void release_bp_slot(struct perf_event *bp) |
62a038d34 hw-breakpoints: i... |
332 |
{ |
ba1c813a6 hw-breakpoints: A... |
333 |
mutex_lock(&nr_bp_mutex); |
f7136c515 hw_breakpoints: A... |
334 |
arch_unregister_hw_breakpoint(bp); |
5352ae638 perf, hw_breakpoi... |
335 |
__release_bp_slot(bp); |
ba1c813a6 hw-breakpoints: A... |
336 337 |
mutex_unlock(&nr_bp_mutex); |
62a038d34 hw-breakpoints: i... |
338 |
} |
5352ae638 perf, hw_breakpoi... |
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 |
/* * Allow the kernel debugger to reserve breakpoint slots without * taking a lock using the dbg_* variant of for the reserve and * release breakpoint slots. */ int dbg_reserve_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; return __reserve_bp_slot(bp); } int dbg_release_bp_slot(struct perf_event *bp) { if (mutex_is_locked(&nr_bp_mutex)) return -1; __release_bp_slot(bp); return 0; } |
ba1c813a6 hw-breakpoints: A... |
361 |
|
b2812d031 hw-breakpoints: C... |
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 |
static int validate_hw_breakpoint(struct perf_event *bp) { int ret; ret = arch_validate_hwbkpt_settings(bp); if (ret) return ret; if (arch_check_bp_in_kernelspace(bp)) { if (bp->attr.exclude_kernel) return -EINVAL; /* * Don't let unprivileged users set a breakpoint in the trap * path to avoid trap recursion attacks. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return 0; } |
b326e9560 hw-breakpoints: U... |
383 |
int register_perf_hw_breakpoint(struct perf_event *bp) |
62a038d34 hw-breakpoints: i... |
384 |
{ |
24f1e32c6 hw-breakpoints: R... |
385 |
int ret; |
62a038d34 hw-breakpoints: i... |
386 |
|
24f1e32c6 hw-breakpoints: R... |
387 388 389 |
ret = reserve_bp_slot(bp); if (ret) return ret; |
62a038d34 hw-breakpoints: i... |
390 |
|
b2812d031 hw-breakpoints: C... |
391 |
ret = validate_hw_breakpoint(bp); |
62a038d34 hw-breakpoints: i... |
392 |
|
b23ff0e93 hw_breakpoints: R... |
393 394 395 |
/* if arch_validate_hwbkpt_settings() fails then release bp slot */ if (ret) release_bp_slot(bp); |
24f1e32c6 hw-breakpoints: R... |
396 397 |
return ret; } |
62a038d34 hw-breakpoints: i... |
398 |
|
62a038d34 hw-breakpoints: i... |
399 400 |
/** * register_user_hw_breakpoint - register a hardware breakpoint for user space |
5fa10b28e hw-breakpoints: U... |
401 |
* @attr: breakpoint attributes |
24f1e32c6 hw-breakpoints: R... |
402 |
* @triggered: callback to trigger when we hit the breakpoint |
62a038d34 hw-breakpoints: i... |
403 |
* @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d34 hw-breakpoints: i... |
404 |
*/ |
24f1e32c6 hw-breakpoints: R... |
405 |
struct perf_event * |
5fa10b28e hw-breakpoints: U... |
406 |
register_user_hw_breakpoint(struct perf_event_attr *attr, |
b326e9560 hw-breakpoints: U... |
407 |
perf_overflow_handler_t triggered, |
4dc0da869 perf: Add context... |
408 |
void *context, |
5fa10b28e hw-breakpoints: U... |
409 |
struct task_struct *tsk) |
62a038d34 hw-breakpoints: i... |
410 |
{ |
4dc0da869 perf: Add context... |
411 412 |
return perf_event_create_kernel_counter(attr, -1, tsk, triggered, context); |
62a038d34 hw-breakpoints: i... |
413 414 415 416 417 |
} EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); /** * modify_user_hw_breakpoint - modify a user-space hardware breakpoint |
24f1e32c6 hw-breakpoints: R... |
418 |
* @bp: the breakpoint structure to modify |
5fa10b28e hw-breakpoints: U... |
419 |
* @attr: new breakpoint attributes |
24f1e32c6 hw-breakpoints: R... |
420 |
* @triggered: callback to trigger when we hit the breakpoint |
62a038d34 hw-breakpoints: i... |
421 |
* @tsk: pointer to 'task_struct' of the process to which the address belongs |
62a038d34 hw-breakpoints: i... |
422 |
*/ |
44234adcd hw-breakpoints: M... |
423 |
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
62a038d34 hw-breakpoints: i... |
424 |
{ |
44234adcd hw-breakpoints: M... |
425 |
u64 old_addr = bp->attr.bp_addr; |
cd757645f perf: Make bp_len... |
426 |
u64 old_len = bp->attr.bp_len; |
44234adcd hw-breakpoints: M... |
427 |
int old_type = bp->attr.bp_type; |
44234adcd hw-breakpoints: M... |
428 429 430 431 432 433 434 435 436 437 |
int err = 0; perf_event_disable(bp); bp->attr.bp_addr = attr->bp_addr; bp->attr.bp_type = attr->bp_type; bp->attr.bp_len = attr->bp_len; if (attr->disabled) goto end; |
62a038d34 hw-breakpoints: i... |
438 |
|
b2812d031 hw-breakpoints: C... |
439 |
err = validate_hw_breakpoint(bp); |
44234adcd hw-breakpoints: M... |
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 |
if (!err) perf_event_enable(bp); if (err) { bp->attr.bp_addr = old_addr; bp->attr.bp_type = old_type; bp->attr.bp_len = old_len; if (!bp->attr.disabled) perf_event_enable(bp); return err; } end: bp->attr.disabled = attr->disabled; return 0; |
62a038d34 hw-breakpoints: i... |
457 458 459 460 |
} EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); /** |
24f1e32c6 hw-breakpoints: R... |
461 |
* unregister_hw_breakpoint - unregister a user-space hardware breakpoint |
62a038d34 hw-breakpoints: i... |
462 |
* @bp: the breakpoint structure to unregister |
62a038d34 hw-breakpoints: i... |
463 |
*/ |
24f1e32c6 hw-breakpoints: R... |
464 |
void unregister_hw_breakpoint(struct perf_event *bp) |
62a038d34 hw-breakpoints: i... |
465 |
{ |
24f1e32c6 hw-breakpoints: R... |
466 467 468 469 470 |
if (!bp) return; perf_event_release_kernel(bp); } EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); |
62a038d34 hw-breakpoints: i... |
471 |
/** |
24f1e32c6 hw-breakpoints: R... |
472 |
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel |
dd1853c3f hw-breakpoints: U... |
473 |
* @attr: breakpoint attributes |
24f1e32c6 hw-breakpoints: R... |
474 |
* @triggered: callback to trigger when we hit the breakpoint |
62a038d34 hw-breakpoints: i... |
475 |
* |
24f1e32c6 hw-breakpoints: R... |
476 |
* @return a set of per_cpu pointers to perf events |
62a038d34 hw-breakpoints: i... |
477 |
*/ |
44ee63587 percpu: Add __per... |
478 |
struct perf_event * __percpu * |
dd1853c3f hw-breakpoints: U... |
479 |
register_wide_hw_breakpoint(struct perf_event_attr *attr, |
4dc0da869 perf: Add context... |
480 481 |
perf_overflow_handler_t triggered, void *context) |
62a038d34 hw-breakpoints: i... |
482 |
{ |
44ee63587 percpu: Add __per... |
483 |
struct perf_event * __percpu *cpu_events, **pevent, *bp; |
24f1e32c6 hw-breakpoints: R... |
484 485 486 487 488 |
long err; int cpu; cpu_events = alloc_percpu(typeof(*cpu_events)); if (!cpu_events) |
44ee63587 percpu: Add __per... |
489 |
return (void __percpu __force *)ERR_PTR(-ENOMEM); |
62a038d34 hw-breakpoints: i... |
490 |
|
88f7a890d ksym_tracer: Fix ... |
491 492 |
get_online_cpus(); for_each_online_cpu(cpu) { |
24f1e32c6 hw-breakpoints: R... |
493 |
pevent = per_cpu_ptr(cpu_events, cpu); |
4dc0da869 perf: Add context... |
494 495 |
bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered, context); |
62a038d34 hw-breakpoints: i... |
496 |
|
24f1e32c6 hw-breakpoints: R... |
497 |
*pevent = bp; |
62a038d34 hw-breakpoints: i... |
498 |
|
605bfaee9 hw-breakpoints: S... |
499 |
if (IS_ERR(bp)) { |
24f1e32c6 hw-breakpoints: R... |
500 501 502 |
err = PTR_ERR(bp); goto fail; } |
62a038d34 hw-breakpoints: i... |
503 |
} |
88f7a890d ksym_tracer: Fix ... |
504 |
put_online_cpus(); |
62a038d34 hw-breakpoints: i... |
505 |
|
24f1e32c6 hw-breakpoints: R... |
506 507 508 |
return cpu_events; fail: |
88f7a890d ksym_tracer: Fix ... |
509 |
for_each_online_cpu(cpu) { |
24f1e32c6 hw-breakpoints: R... |
510 |
pevent = per_cpu_ptr(cpu_events, cpu); |
605bfaee9 hw-breakpoints: S... |
511 |
if (IS_ERR(*pevent)) |
24f1e32c6 hw-breakpoints: R... |
512 513 514 |
break; unregister_hw_breakpoint(*pevent); } |
88f7a890d ksym_tracer: Fix ... |
515 |
put_online_cpus(); |
24f1e32c6 hw-breakpoints: R... |
516 |
free_percpu(cpu_events); |
44ee63587 percpu: Add __per... |
517 |
return (void __percpu __force *)ERR_PTR(err); |
62a038d34 hw-breakpoints: i... |
518 |
} |
f60d24d2a hw-breakpoints: F... |
519 |
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
62a038d34 hw-breakpoints: i... |
520 521 |
/** |
24f1e32c6 hw-breakpoints: R... |
522 523 |
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel * @cpu_events: the per cpu set of events to unregister |
62a038d34 hw-breakpoints: i... |
524 |
*/ |
44ee63587 percpu: Add __per... |
525 |
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) |
62a038d34 hw-breakpoints: i... |
526 |
{ |
24f1e32c6 hw-breakpoints: R... |
527 528 |
int cpu; struct perf_event **pevent; |
62a038d34 hw-breakpoints: i... |
529 |
|
24f1e32c6 hw-breakpoints: R... |
530 531 532 |
for_each_possible_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); unregister_hw_breakpoint(*pevent); |
62a038d34 hw-breakpoints: i... |
533 |
} |
24f1e32c6 hw-breakpoints: R... |
534 |
free_percpu(cpu_events); |
62a038d34 hw-breakpoints: i... |
535 |
} |
f60d24d2a hw-breakpoints: F... |
536 |
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); |
62a038d34 hw-breakpoints: i... |
537 538 539 540 541 542 |
static struct notifier_block hw_breakpoint_exceptions_nb = { .notifier_call = hw_breakpoint_exceptions_notify, /* we need to be notified first */ .priority = 0x7fffffff }; |
b0a873ebb perf: Register PM... |
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 |
static void bp_perf_event_destroy(struct perf_event *event) { release_bp_slot(event); } static int hw_breakpoint_event_init(struct perf_event *bp) { int err; if (bp->attr.type != PERF_TYPE_BREAKPOINT) return -ENOENT; err = register_perf_hw_breakpoint(bp); if (err) return err; bp->destroy = bp_perf_event_destroy; return 0; } |
a4eaf7f14 perf: Rework the ... |
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 |
static int hw_breakpoint_add(struct perf_event *bp, int flags) { if (!(flags & PERF_EF_START)) bp->hw.state = PERF_HES_STOPPED; return arch_install_hw_breakpoint(bp); } static void hw_breakpoint_del(struct perf_event *bp, int flags) { arch_uninstall_hw_breakpoint(bp); } static void hw_breakpoint_start(struct perf_event *bp, int flags) { bp->hw.state = 0; } static void hw_breakpoint_stop(struct perf_event *bp, int flags) { bp->hw.state = PERF_HES_STOPPED; } |
b0a873ebb perf: Register PM... |
585 |
static struct pmu perf_breakpoint = { |
89a1e1873 perf: Provide a s... |
586 |
.task_ctx_nr = perf_sw_context, /* could eventually get its own */ |
b0a873ebb perf: Register PM... |
587 |
.event_init = hw_breakpoint_event_init, |
a4eaf7f14 perf: Rework the ... |
588 589 590 591 |
.add = hw_breakpoint_add, .del = hw_breakpoint_del, .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, |
b0a873ebb perf: Register PM... |
592 593 |
.read = hw_breakpoint_pmu_read, }; |
3c502e7a0 perf,hw_breakpoin... |
594 |
int __init init_hw_breakpoint(void) |
62a038d34 hw-breakpoints: i... |
595 |
{ |
feef47d0c hw-breakpoints: G... |
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 |
unsigned int **task_bp_pinned; int cpu, err_cpu; int i; for (i = 0; i < TYPE_MAX; i++) nr_slots[i] = hw_breakpoint_slots(i); for_each_possible_cpu(cpu) { for (i = 0; i < TYPE_MAX; i++) { task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], GFP_KERNEL); if (!*task_bp_pinned) goto err_alloc; } } constraints_initialized = 1; |
2e80a82a4 perf: Dynamic pmu... |
614 |
perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
b0a873ebb perf: Register PM... |
615 |
|
62a038d34 hw-breakpoints: i... |
616 |
return register_die_notifier(&hw_breakpoint_exceptions_nb); |
feef47d0c hw-breakpoints: G... |
617 618 619 620 621 622 623 624 625 626 |
err_alloc: for_each_possible_cpu(err_cpu) { if (err_cpu == cpu) break; for (i = 0; i < TYPE_MAX; i++) kfree(per_cpu(nr_task_bp_pinned[i], cpu)); } return -ENOMEM; |
62a038d34 hw-breakpoints: i... |
627 |
} |
24f1e32c6 hw-breakpoints: R... |
628 |