2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/cpu.h>
44 #include <linux/smp.h>
46 #include <linux/hw_breakpoint.h>
52 /* Number of pinned cpu breakpoints in a cpu */
53 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned
);
55 /* Number of pinned task breakpoints in a cpu */
56 static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned
[HBP_NUM
]);
58 /* Number of non-pinned cpu/task breakpoints in a cpu */
59 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible
);
61 /* Gather the number of total pinned and un-pinned bp in a cpuset */
62 struct bp_busy_slots
{
64 unsigned int flexible
;
67 /* Serialize accesses to the above constraints */
68 static DEFINE_MUTEX(nr_bp_mutex
);
71 * Report the maximum number of pinned breakpoints a task
74 static unsigned int max_task_bp_pinned(int cpu
)
77 unsigned int *tsk_pinned
= per_cpu(nr_task_bp_pinned
, cpu
);
79 for (i
= HBP_NUM
-1; i
>= 0; i
--) {
80 if (tsk_pinned
[i
] > 0)
87 static int task_bp_pinned(struct task_struct
*tsk
)
89 struct perf_event_context
*ctx
= tsk
->perf_event_ctxp
;
90 struct list_head
*list
;
91 struct perf_event
*bp
;
95 if (WARN_ONCE(!ctx
, "No perf context for this task"))
98 list
= &ctx
->event_list
;
100 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
103 * The current breakpoint counter is not included in the list
104 * at the open() callback time
106 list_for_each_entry(bp
, list
, event_entry
) {
107 if (bp
->attr
.type
== PERF_TYPE_BREAKPOINT
)
111 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
117 * Report the number of pinned/un-pinned breakpoints we have in
118 * a given cpu (cpu > -1) or in all of them (cpu = -1).
121 fetch_bp_busy_slots(struct bp_busy_slots
*slots
, struct perf_event
*bp
)
124 struct task_struct
*tsk
= bp
->ctx
->task
;
127 slots
->pinned
= per_cpu(nr_cpu_bp_pinned
, cpu
);
129 slots
->pinned
+= max_task_bp_pinned(cpu
);
131 slots
->pinned
+= task_bp_pinned(tsk
);
132 slots
->flexible
= per_cpu(nr_bp_flexible
, cpu
);
137 for_each_online_cpu(cpu
) {
140 nr
= per_cpu(nr_cpu_bp_pinned
, cpu
);
142 nr
+= max_task_bp_pinned(cpu
);
144 nr
+= task_bp_pinned(tsk
);
146 if (nr
> slots
->pinned
)
149 nr
= per_cpu(nr_bp_flexible
, cpu
);
151 if (nr
> slots
->flexible
)
152 slots
->flexible
= nr
;
157 * Add a pinned breakpoint for the given task in our constraint table
159 static void toggle_bp_task_slot(struct task_struct
*tsk
, int cpu
, bool enable
)
161 unsigned int *tsk_pinned
;
164 count
= task_bp_pinned(tsk
);
166 tsk_pinned
= per_cpu(nr_task_bp_pinned
, cpu
);
170 tsk_pinned
[count
-1]--;
174 tsk_pinned
[count
-1]++;
179 * Add/remove the given breakpoint in our constraint table
181 static void toggle_bp_slot(struct perf_event
*bp
, bool enable
)
184 struct task_struct
*tsk
= bp
->ctx
->task
;
186 /* Pinned counter task profiling */
189 toggle_bp_task_slot(tsk
, cpu
, enable
);
193 for_each_online_cpu(cpu
)
194 toggle_bp_task_slot(tsk
, cpu
, enable
);
198 /* Pinned counter cpu profiling */
200 per_cpu(nr_cpu_bp_pinned
, bp
->cpu
)++;
202 per_cpu(nr_cpu_bp_pinned
, bp
->cpu
)--;
206 * Contraints to check before allowing this new breakpoint counter:
208 * == Non-pinned counter == (Considered as pinned for now)
210 * - If attached to a single cpu, check:
212 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
213 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
215 * -> If there are already non-pinned counters in this cpu, it means
216 * there is already a free slot for them.
217 * Otherwise, we check that the maximum number of per task
218 * breakpoints (for this cpu) plus the number of per cpu breakpoint
219 * (for this cpu) doesn't cover every registers.
221 * - If attached to every cpus, check:
223 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
224 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
226 * -> This is roughly the same, except we check the number of per cpu
227 * bp for every cpu and we keep the max one. Same for the per tasks
231 * == Pinned counter ==
233 * - If attached to a single cpu, check:
235 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
236 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
238 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
239 * one register at least (or they will never be fed).
241 * - If attached to every cpus, check:
243 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
244 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
246 static int __reserve_bp_slot(struct perf_event
*bp
)
248 struct bp_busy_slots slots
= {0};
250 fetch_bp_busy_slots(&slots
, bp
);
252 /* Flexible counters need to keep at least one slot */
253 if (slots
.pinned
+ (!!slots
.flexible
) == HBP_NUM
)
256 toggle_bp_slot(bp
, true);
261 int reserve_bp_slot(struct perf_event
*bp
)
265 mutex_lock(&nr_bp_mutex
);
267 ret
= __reserve_bp_slot(bp
);
269 mutex_unlock(&nr_bp_mutex
);
274 static void __release_bp_slot(struct perf_event
*bp
)
276 toggle_bp_slot(bp
, false);
279 void release_bp_slot(struct perf_event
*bp
)
281 mutex_lock(&nr_bp_mutex
);
283 __release_bp_slot(bp
);
285 mutex_unlock(&nr_bp_mutex
);
289 * Allow the kernel debugger to reserve breakpoint slots without
290 * taking a lock using the dbg_* variant of for the reserve and
291 * release breakpoint slots.
293 int dbg_reserve_bp_slot(struct perf_event
*bp
)
295 if (mutex_is_locked(&nr_bp_mutex
))
298 return __reserve_bp_slot(bp
);
301 int dbg_release_bp_slot(struct perf_event
*bp
)
303 if (mutex_is_locked(&nr_bp_mutex
))
306 __release_bp_slot(bp
);
311 int register_perf_hw_breakpoint(struct perf_event
*bp
)
315 ret
= reserve_bp_slot(bp
);
320 * Ptrace breakpoints can be temporary perf events only
321 * meant to reserve a slot. In this case, it is created disabled and
322 * we don't want to check the params right now (as we put a null addr)
323 * But perf tools create events as disabled and we want to check
324 * the params for them.
325 * This is a quick hack that will be removed soon, once we remove
326 * the tmp breakpoints from ptrace
328 if (!bp
->attr
.disabled
|| !bp
->overflow_handler
)
329 ret
= arch_validate_hwbkpt_settings(bp
, bp
->ctx
->task
);
331 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
339 * register_user_hw_breakpoint - register a hardware breakpoint for user space
340 * @attr: breakpoint attributes
341 * @triggered: callback to trigger when we hit the breakpoint
342 * @tsk: pointer to 'task_struct' of the process to which the address belongs
345 register_user_hw_breakpoint(struct perf_event_attr
*attr
,
346 perf_overflow_handler_t triggered
,
347 struct task_struct
*tsk
)
349 return perf_event_create_kernel_counter(attr
, -1, tsk
->pid
, triggered
);
351 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint
);
354 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
355 * @bp: the breakpoint structure to modify
356 * @attr: new breakpoint attributes
357 * @triggered: callback to trigger when we hit the breakpoint
358 * @tsk: pointer to 'task_struct' of the process to which the address belongs
360 int modify_user_hw_breakpoint(struct perf_event
*bp
, struct perf_event_attr
*attr
)
362 u64 old_addr
= bp
->attr
.bp_addr
;
363 u64 old_len
= bp
->attr
.bp_len
;
364 int old_type
= bp
->attr
.bp_type
;
367 perf_event_disable(bp
);
369 bp
->attr
.bp_addr
= attr
->bp_addr
;
370 bp
->attr
.bp_type
= attr
->bp_type
;
371 bp
->attr
.bp_len
= attr
->bp_len
;
376 err
= arch_validate_hwbkpt_settings(bp
, bp
->ctx
->task
);
378 perf_event_enable(bp
);
381 bp
->attr
.bp_addr
= old_addr
;
382 bp
->attr
.bp_type
= old_type
;
383 bp
->attr
.bp_len
= old_len
;
384 if (!bp
->attr
.disabled
)
385 perf_event_enable(bp
);
391 bp
->attr
.disabled
= attr
->disabled
;
395 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint
);
398 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
399 * @bp: the breakpoint structure to unregister
401 void unregister_hw_breakpoint(struct perf_event
*bp
)
405 perf_event_release_kernel(bp
);
407 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint
);
410 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
411 * @attr: breakpoint attributes
412 * @triggered: callback to trigger when we hit the breakpoint
414 * @return a set of per_cpu pointers to perf events
416 struct perf_event
* __percpu
*
417 register_wide_hw_breakpoint(struct perf_event_attr
*attr
,
418 perf_overflow_handler_t triggered
)
420 struct perf_event
* __percpu
*cpu_events
, **pevent
, *bp
;
424 cpu_events
= alloc_percpu(typeof(*cpu_events
));
426 return (void __percpu __force
*)ERR_PTR(-ENOMEM
);
429 for_each_online_cpu(cpu
) {
430 pevent
= per_cpu_ptr(cpu_events
, cpu
);
431 bp
= perf_event_create_kernel_counter(attr
, cpu
, -1, triggered
);
445 for_each_online_cpu(cpu
) {
446 pevent
= per_cpu_ptr(cpu_events
, cpu
);
449 unregister_hw_breakpoint(*pevent
);
453 free_percpu(cpu_events
);
454 return (void __percpu __force
*)ERR_PTR(err
);
456 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint
);
459 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
460 * @cpu_events: the per cpu set of events to unregister
462 void unregister_wide_hw_breakpoint(struct perf_event
* __percpu
*cpu_events
)
465 struct perf_event
**pevent
;
467 for_each_possible_cpu(cpu
) {
468 pevent
= per_cpu_ptr(cpu_events
, cpu
);
469 unregister_hw_breakpoint(*pevent
);
471 free_percpu(cpu_events
);
473 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint
);
475 static struct notifier_block hw_breakpoint_exceptions_nb
= {
476 .notifier_call
= hw_breakpoint_exceptions_notify
,
477 /* we need to be notified first */
478 .priority
= 0x7fffffff
481 static int __init
init_hw_breakpoint(void)
483 return register_die_notifier(&hw_breakpoint_exceptions_nb
);
485 core_initcall(init_hw_breakpoint
);
488 struct pmu perf_ops_bp
= {
489 .enable
= arch_install_hw_breakpoint
,
490 .disable
= arch_uninstall_hw_breakpoint
,
491 .read
= hw_breakpoint_pmu_read
,