2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/cpu.h>
45 #include <linux/smp.h>
47 #include <linux/hw_breakpoint.h>
54 /* Number of pinned cpu breakpoints in a cpu */
55 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned
[TYPE_MAX
]);
57 /* Number of pinned task breakpoints in a cpu */
58 static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned
[TYPE_MAX
]);
60 /* Number of non-pinned cpu/task breakpoints in a cpu */
61 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible
[TYPE_MAX
]);
63 static int nr_slots
[TYPE_MAX
];
65 static int constraints_initialized
;
67 /* Gather the number of total pinned and un-pinned bp in a cpuset */
68 struct bp_busy_slots
{
70 unsigned int flexible
;
73 /* Serialize accesses to the above constraints */
74 static DEFINE_MUTEX(nr_bp_mutex
);
76 __weak
int hw_breakpoint_weight(struct perf_event
*bp
)
81 static inline enum bp_type_idx
find_slot_idx(struct perf_event
*bp
)
83 if (bp
->attr
.bp_type
& HW_BREAKPOINT_RW
)
90 * Report the maximum number of pinned breakpoints a task
93 static unsigned int max_task_bp_pinned(int cpu
, enum bp_type_idx type
)
96 unsigned int *tsk_pinned
= per_cpu(nr_task_bp_pinned
[type
], cpu
);
98 for (i
= nr_slots
[type
] - 1; i
>= 0; i
--) {
99 if (tsk_pinned
[i
] > 0)
106 static int task_bp_pinned(struct task_struct
*tsk
, enum bp_type_idx type
)
108 struct perf_event_context
*ctx
= tsk
->perf_event_ctxp
;
109 struct list_head
*list
;
110 struct perf_event
*bp
;
114 if (WARN_ONCE(!ctx
, "No perf context for this task"))
117 list
= &ctx
->event_list
;
119 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
122 * The current breakpoint counter is not included in the list
123 * at the open() callback time
125 list_for_each_entry(bp
, list
, event_entry
) {
126 if (bp
->attr
.type
== PERF_TYPE_BREAKPOINT
)
127 if (find_slot_idx(bp
) == type
)
128 count
+= hw_breakpoint_weight(bp
);
131 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
137 * Report the number of pinned/un-pinned breakpoints we have in
138 * a given cpu (cpu > -1) or in all of them (cpu = -1).
141 fetch_bp_busy_slots(struct bp_busy_slots
*slots
, struct perf_event
*bp
,
142 enum bp_type_idx type
)
145 struct task_struct
*tsk
= bp
->ctx
->task
;
148 slots
->pinned
= per_cpu(nr_cpu_bp_pinned
[type
], cpu
);
150 slots
->pinned
+= max_task_bp_pinned(cpu
, type
);
152 slots
->pinned
+= task_bp_pinned(tsk
, type
);
153 slots
->flexible
= per_cpu(nr_bp_flexible
[type
], cpu
);
158 for_each_online_cpu(cpu
) {
161 nr
= per_cpu(nr_cpu_bp_pinned
[type
], cpu
);
163 nr
+= max_task_bp_pinned(cpu
, type
);
165 nr
+= task_bp_pinned(tsk
, type
);
167 if (nr
> slots
->pinned
)
170 nr
= per_cpu(nr_bp_flexible
[type
], cpu
);
172 if (nr
> slots
->flexible
)
173 slots
->flexible
= nr
;
178 * For now, continue to consider flexible as pinned, until we can
179 * ensure no flexible event can ever be scheduled before a pinned event
183 fetch_this_slot(struct bp_busy_slots
*slots
, int weight
)
185 slots
->pinned
+= weight
;
189 * Add a pinned breakpoint for the given task in our constraint table
191 static void toggle_bp_task_slot(struct task_struct
*tsk
, int cpu
, bool enable
,
192 enum bp_type_idx type
, int weight
)
194 unsigned int *tsk_pinned
;
199 old_count
= task_bp_pinned(tsk
, type
);
200 old_idx
= old_count
- 1;
201 idx
= old_idx
+ weight
;
203 tsk_pinned
= per_cpu(nr_task_bp_pinned
[type
], cpu
);
207 tsk_pinned
[old_idx
]--;
211 tsk_pinned
[old_idx
]++;
216 * Add/remove the given breakpoint in our constraint table
219 toggle_bp_slot(struct perf_event
*bp
, bool enable
, enum bp_type_idx type
,
223 struct task_struct
*tsk
= bp
->ctx
->task
;
225 /* Pinned counter task profiling */
228 toggle_bp_task_slot(tsk
, cpu
, enable
, type
, weight
);
232 for_each_online_cpu(cpu
)
233 toggle_bp_task_slot(tsk
, cpu
, enable
, type
, weight
);
237 /* Pinned counter cpu profiling */
239 per_cpu(nr_cpu_bp_pinned
[type
], bp
->cpu
) += weight
;
241 per_cpu(nr_cpu_bp_pinned
[type
], bp
->cpu
) -= weight
;
245 * Function to perform processor-specific cleanup during unregistration
247 __weak
void arch_unregister_hw_breakpoint(struct perf_event
*bp
)
250 * A weak stub function here for those archs that don't define
251 * it inside arch/.../kernel/hw_breakpoint.c
256 * Contraints to check before allowing this new breakpoint counter:
258 * == Non-pinned counter == (Considered as pinned for now)
260 * - If attached to a single cpu, check:
262 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
263 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
265 * -> If there are already non-pinned counters in this cpu, it means
266 * there is already a free slot for them.
267 * Otherwise, we check that the maximum number of per task
268 * breakpoints (for this cpu) plus the number of per cpu breakpoint
269 * (for this cpu) doesn't cover every registers.
271 * - If attached to every cpus, check:
273 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
274 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
276 * -> This is roughly the same, except we check the number of per cpu
277 * bp for every cpu and we keep the max one. Same for the per tasks
281 * == Pinned counter ==
283 * - If attached to a single cpu, check:
285 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
286 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
288 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
289 * one register at least (or they will never be fed).
291 * - If attached to every cpus, check:
293 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
294 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
296 static int __reserve_bp_slot(struct perf_event
*bp
)
298 struct bp_busy_slots slots
= {0};
299 enum bp_type_idx type
;
302 /* We couldn't initialize breakpoint constraints on boot */
303 if (!constraints_initialized
)
307 if (bp
->attr
.bp_type
== HW_BREAKPOINT_EMPTY
||
308 bp
->attr
.bp_type
== HW_BREAKPOINT_INVALID
)
311 type
= find_slot_idx(bp
);
312 weight
= hw_breakpoint_weight(bp
);
314 fetch_bp_busy_slots(&slots
, bp
, type
);
315 fetch_this_slot(&slots
, weight
);
317 /* Flexible counters need to keep at least one slot */
318 if (slots
.pinned
+ (!!slots
.flexible
) > nr_slots
[type
])
321 toggle_bp_slot(bp
, true, type
, weight
);
326 int reserve_bp_slot(struct perf_event
*bp
)
330 mutex_lock(&nr_bp_mutex
);
332 ret
= __reserve_bp_slot(bp
);
334 mutex_unlock(&nr_bp_mutex
);
339 static void __release_bp_slot(struct perf_event
*bp
)
341 enum bp_type_idx type
;
344 type
= find_slot_idx(bp
);
345 weight
= hw_breakpoint_weight(bp
);
346 toggle_bp_slot(bp
, false, type
, weight
);
349 void release_bp_slot(struct perf_event
*bp
)
351 mutex_lock(&nr_bp_mutex
);
353 arch_unregister_hw_breakpoint(bp
);
354 __release_bp_slot(bp
);
356 mutex_unlock(&nr_bp_mutex
);
360 * Allow the kernel debugger to reserve breakpoint slots without
361 * taking a lock using the dbg_* variant of for the reserve and
362 * release breakpoint slots.
364 int dbg_reserve_bp_slot(struct perf_event
*bp
)
366 if (mutex_is_locked(&nr_bp_mutex
))
369 return __reserve_bp_slot(bp
);
372 int dbg_release_bp_slot(struct perf_event
*bp
)
374 if (mutex_is_locked(&nr_bp_mutex
))
377 __release_bp_slot(bp
);
382 static int validate_hw_breakpoint(struct perf_event
*bp
)
386 ret
= arch_validate_hwbkpt_settings(bp
);
390 if (arch_check_bp_in_kernelspace(bp
)) {
391 if (bp
->attr
.exclude_kernel
)
394 * Don't let unprivileged users set a breakpoint in the trap
395 * path to avoid trap recursion attacks.
397 if (!capable(CAP_SYS_ADMIN
))
404 int register_perf_hw_breakpoint(struct perf_event
*bp
)
408 ret
= reserve_bp_slot(bp
);
412 ret
= validate_hw_breakpoint(bp
);
414 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
422 * register_user_hw_breakpoint - register a hardware breakpoint for user space
423 * @attr: breakpoint attributes
424 * @triggered: callback to trigger when we hit the breakpoint
425 * @tsk: pointer to 'task_struct' of the process to which the address belongs
428 register_user_hw_breakpoint(struct perf_event_attr
*attr
,
429 perf_overflow_handler_t triggered
,
430 struct task_struct
*tsk
)
432 return perf_event_create_kernel_counter(attr
, -1, tsk
->pid
, triggered
);
434 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint
);
437 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
438 * @bp: the breakpoint structure to modify
439 * @attr: new breakpoint attributes
440 * @triggered: callback to trigger when we hit the breakpoint
441 * @tsk: pointer to 'task_struct' of the process to which the address belongs
443 int modify_user_hw_breakpoint(struct perf_event
*bp
, struct perf_event_attr
*attr
)
445 u64 old_addr
= bp
->attr
.bp_addr
;
446 u64 old_len
= bp
->attr
.bp_len
;
447 int old_type
= bp
->attr
.bp_type
;
450 perf_event_disable(bp
);
452 bp
->attr
.bp_addr
= attr
->bp_addr
;
453 bp
->attr
.bp_type
= attr
->bp_type
;
454 bp
->attr
.bp_len
= attr
->bp_len
;
459 err
= validate_hw_breakpoint(bp
);
461 perf_event_enable(bp
);
464 bp
->attr
.bp_addr
= old_addr
;
465 bp
->attr
.bp_type
= old_type
;
466 bp
->attr
.bp_len
= old_len
;
467 if (!bp
->attr
.disabled
)
468 perf_event_enable(bp
);
474 bp
->attr
.disabled
= attr
->disabled
;
478 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint
);
481 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
482 * @bp: the breakpoint structure to unregister
484 void unregister_hw_breakpoint(struct perf_event
*bp
)
488 perf_event_release_kernel(bp
);
490 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint
);
493 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
494 * @attr: breakpoint attributes
495 * @triggered: callback to trigger when we hit the breakpoint
497 * @return a set of per_cpu pointers to perf events
499 struct perf_event
* __percpu
*
500 register_wide_hw_breakpoint(struct perf_event_attr
*attr
,
501 perf_overflow_handler_t triggered
)
503 struct perf_event
* __percpu
*cpu_events
, **pevent
, *bp
;
507 cpu_events
= alloc_percpu(typeof(*cpu_events
));
509 return (void __percpu __force
*)ERR_PTR(-ENOMEM
);
512 for_each_online_cpu(cpu
) {
513 pevent
= per_cpu_ptr(cpu_events
, cpu
);
514 bp
= perf_event_create_kernel_counter(attr
, cpu
, -1, triggered
);
528 for_each_online_cpu(cpu
) {
529 pevent
= per_cpu_ptr(cpu_events
, cpu
);
532 unregister_hw_breakpoint(*pevent
);
536 free_percpu(cpu_events
);
537 return (void __percpu __force
*)ERR_PTR(err
);
539 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint
);
542 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
543 * @cpu_events: the per cpu set of events to unregister
545 void unregister_wide_hw_breakpoint(struct perf_event
* __percpu
*cpu_events
)
548 struct perf_event
**pevent
;
550 for_each_possible_cpu(cpu
) {
551 pevent
= per_cpu_ptr(cpu_events
, cpu
);
552 unregister_hw_breakpoint(*pevent
);
554 free_percpu(cpu_events
);
556 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint
);
558 static struct notifier_block hw_breakpoint_exceptions_nb
= {
559 .notifier_call
= hw_breakpoint_exceptions_notify
,
560 /* we need to be notified first */
561 .priority
= 0x7fffffff
564 static int __init
init_hw_breakpoint(void)
566 unsigned int **task_bp_pinned
;
570 for (i
= 0; i
< TYPE_MAX
; i
++)
571 nr_slots
[i
] = hw_breakpoint_slots(i
);
573 for_each_possible_cpu(cpu
) {
574 for (i
= 0; i
< TYPE_MAX
; i
++) {
575 task_bp_pinned
= &per_cpu(nr_task_bp_pinned
[i
], cpu
);
576 *task_bp_pinned
= kzalloc(sizeof(int) * nr_slots
[i
],
578 if (!*task_bp_pinned
)
583 constraints_initialized
= 1;
585 return register_die_notifier(&hw_breakpoint_exceptions_nb
);
588 for_each_possible_cpu(err_cpu
) {
591 for (i
= 0; i
< TYPE_MAX
; i
++)
592 kfree(per_cpu(nr_task_bp_pinned
[i
], cpu
));
597 core_initcall(init_hw_breakpoint
);
600 struct pmu perf_ops_bp
= {
601 .enable
= arch_install_hw_breakpoint
,
602 .disable
= arch_uninstall_hw_breakpoint
,
603 .read
= hw_breakpoint_pmu_read
,