2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_event.h>
31 #include <asm/irq_regs.h>
34 * Each CPU has a list of per CPU events:
36 DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
38 int perf_max_events __read_mostly
= 1;
39 static int perf_reserved_percpu __read_mostly
;
40 static int perf_overcommit __read_mostly
= 1;
42 static atomic_t nr_events __read_mostly
;
43 static atomic_t nr_mmap_events __read_mostly
;
44 static atomic_t nr_comm_events __read_mostly
;
45 static atomic_t nr_task_events __read_mostly
;
48 * perf event paranoia level:
49 * -1 - not paranoid at all
50 * 0 - disallow raw tracepoint access for unpriv
51 * 1 - disallow cpu events for unpriv
52 * 2 - disallow kernel profiling for unpriv
54 int sysctl_perf_event_paranoid __read_mostly
= 1;
56 static inline bool perf_paranoid_tracepoint_raw(void)
58 return sysctl_perf_event_paranoid
> -1;
61 static inline bool perf_paranoid_cpu(void)
63 return sysctl_perf_event_paranoid
> 0;
66 static inline bool perf_paranoid_kernel(void)
68 return sysctl_perf_event_paranoid
> 1;
71 int sysctl_perf_event_mlock __read_mostly
= 512; /* 'free' kb per user */
74 * max perf event sample rate
76 int sysctl_perf_event_sample_rate __read_mostly
= 100000;
78 static atomic64_t perf_event_id
;
81 * Lock for (sysadmin-configurable) event reservations:
83 static DEFINE_SPINLOCK(perf_resource_lock
);
86 * Architecture provided APIs - weak aliases:
88 extern __weak
const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
93 void __weak
hw_perf_disable(void) { barrier(); }
94 void __weak
hw_perf_enable(void) { barrier(); }
96 void __weak
hw_perf_event_setup(int cpu
) { barrier(); }
97 void __weak
hw_perf_event_setup_online(int cpu
) { barrier(); }
100 hw_perf_group_sched_in(struct perf_event
*group_leader
,
101 struct perf_cpu_context
*cpuctx
,
102 struct perf_event_context
*ctx
, int cpu
)
107 void __weak
perf_event_print_debug(void) { }
109 static DEFINE_PER_CPU(int, perf_disable_count
);
111 void __perf_disable(void)
113 __get_cpu_var(perf_disable_count
)++;
116 bool __perf_enable(void)
118 return !--__get_cpu_var(perf_disable_count
);
121 void perf_disable(void)
127 void perf_enable(void)
133 static void get_ctx(struct perf_event_context
*ctx
)
135 WARN_ON(!atomic_inc_not_zero(&ctx
->refcount
));
138 static void free_ctx(struct rcu_head
*head
)
140 struct perf_event_context
*ctx
;
142 ctx
= container_of(head
, struct perf_event_context
, rcu_head
);
146 static void put_ctx(struct perf_event_context
*ctx
)
148 if (atomic_dec_and_test(&ctx
->refcount
)) {
150 put_ctx(ctx
->parent_ctx
);
152 put_task_struct(ctx
->task
);
153 call_rcu(&ctx
->rcu_head
, free_ctx
);
157 static void unclone_ctx(struct perf_event_context
*ctx
)
159 if (ctx
->parent_ctx
) {
160 put_ctx(ctx
->parent_ctx
);
161 ctx
->parent_ctx
= NULL
;
166 * If we inherit events we want to return the parent event id
169 static u64
primary_event_id(struct perf_event
*event
)
174 id
= event
->parent
->id
;
180 * Get the perf_event_context for a task and lock it.
181 * This has to cope with with the fact that until it is locked,
182 * the context could get moved to another task.
184 static struct perf_event_context
*
185 perf_lock_task_context(struct task_struct
*task
, unsigned long *flags
)
187 struct perf_event_context
*ctx
;
191 ctx
= rcu_dereference(task
->perf_event_ctxp
);
194 * If this context is a clone of another, it might
195 * get swapped for another underneath us by
196 * perf_event_task_sched_out, though the
197 * rcu_read_lock() protects us from any context
198 * getting freed. Lock the context and check if it
199 * got swapped before we could get the lock, and retry
200 * if so. If we locked the right context, then it
201 * can't get swapped on us any more.
203 spin_lock_irqsave(&ctx
->lock
, *flags
);
204 if (ctx
!= rcu_dereference(task
->perf_event_ctxp
)) {
205 spin_unlock_irqrestore(&ctx
->lock
, *flags
);
209 if (!atomic_inc_not_zero(&ctx
->refcount
)) {
210 spin_unlock_irqrestore(&ctx
->lock
, *flags
);
219 * Get the context for a task and increment its pin_count so it
220 * can't get swapped to another task. This also increments its
221 * reference count so that the context can't get freed.
223 static struct perf_event_context
*perf_pin_task_context(struct task_struct
*task
)
225 struct perf_event_context
*ctx
;
228 ctx
= perf_lock_task_context(task
, &flags
);
231 spin_unlock_irqrestore(&ctx
->lock
, flags
);
236 static void perf_unpin_context(struct perf_event_context
*ctx
)
240 spin_lock_irqsave(&ctx
->lock
, flags
);
242 spin_unlock_irqrestore(&ctx
->lock
, flags
);
247 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held.
251 list_add_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
253 struct perf_event
*group_leader
= event
->group_leader
;
256 * Depending on whether it is a standalone or sibling event,
257 * add it straight to the context's event list, or to the group
258 * leader's sibling list:
260 if (group_leader
== event
)
261 list_add_tail(&event
->group_entry
, &ctx
->group_list
);
263 list_add_tail(&event
->group_entry
, &group_leader
->sibling_list
);
264 group_leader
->nr_siblings
++;
267 list_add_rcu(&event
->event_entry
, &ctx
->event_list
);
269 if (event
->attr
.inherit_stat
)
274 * Remove a event from the lists for its context.
275 * Must be called with ctx->mutex and ctx->lock held.
278 list_del_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
280 struct perf_event
*sibling
, *tmp
;
282 if (list_empty(&event
->group_entry
))
285 if (event
->attr
.inherit_stat
)
288 list_del_init(&event
->group_entry
);
289 list_del_rcu(&event
->event_entry
);
291 if (event
->group_leader
!= event
)
292 event
->group_leader
->nr_siblings
--;
295 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton events by adding them
297 * to the context list directly:
299 list_for_each_entry_safe(sibling
, tmp
, &event
->sibling_list
, group_entry
) {
301 list_move_tail(&sibling
->group_entry
, &ctx
->group_list
);
302 sibling
->group_leader
= sibling
;
307 event_sched_out(struct perf_event
*event
,
308 struct perf_cpu_context
*cpuctx
,
309 struct perf_event_context
*ctx
)
311 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
314 event
->state
= PERF_EVENT_STATE_INACTIVE
;
315 if (event
->pending_disable
) {
316 event
->pending_disable
= 0;
317 event
->state
= PERF_EVENT_STATE_OFF
;
319 event
->tstamp_stopped
= ctx
->time
;
320 event
->pmu
->disable(event
);
323 if (!is_software_event(event
))
324 cpuctx
->active_oncpu
--;
326 if (event
->attr
.exclusive
|| !cpuctx
->active_oncpu
)
327 cpuctx
->exclusive
= 0;
331 group_sched_out(struct perf_event
*group_event
,
332 struct perf_cpu_context
*cpuctx
,
333 struct perf_event_context
*ctx
)
335 struct perf_event
*event
;
337 if (group_event
->state
!= PERF_EVENT_STATE_ACTIVE
)
340 event_sched_out(group_event
, cpuctx
, ctx
);
343 * Schedule out siblings (if any):
345 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
)
346 event_sched_out(event
, cpuctx
, ctx
);
348 if (group_event
->attr
.exclusive
)
349 cpuctx
->exclusive
= 0;
353 * Cross CPU call to remove a performance event
355 * We disable the event on the hardware level first. After that we
356 * remove it from the context list.
358 static void __perf_event_remove_from_context(void *info
)
360 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
361 struct perf_event
*event
= info
;
362 struct perf_event_context
*ctx
= event
->ctx
;
365 * If this is a task context, we need to check whether it is
366 * the current task context of this cpu. If not it has been
367 * scheduled out before the smp call arrived.
369 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
372 spin_lock(&ctx
->lock
);
374 * Protect the list operation against NMI by disabling the
375 * events on a global level.
379 event_sched_out(event
, cpuctx
, ctx
);
381 list_del_event(event
, ctx
);
385 * Allow more per task events with respect to the
388 cpuctx
->max_pertask
=
389 min(perf_max_events
- ctx
->nr_events
,
390 perf_max_events
- perf_reserved_percpu
);
394 spin_unlock(&ctx
->lock
);
399 * Remove the event from a task's (or a CPU's) list of events.
401 * Must be called with ctx->mutex held.
403 * CPU events are removed with a smp call. For task events we only
404 * call when the task is on a CPU.
406 * If event->ctx is a cloned context, callers must make sure that
407 * every task struct that event->ctx->task could possibly point to
408 * remains valid. This is OK when called from perf_release since
409 * that only calls us on the top-level context, which can't be a clone.
410 * When called from perf_event_exit_task, it's OK because the
411 * context has been detached from its task.
413 static void perf_event_remove_from_context(struct perf_event
*event
)
415 struct perf_event_context
*ctx
= event
->ctx
;
416 struct task_struct
*task
= ctx
->task
;
420 * Per cpu events are removed via an smp call and
421 * the removal is always sucessful.
423 smp_call_function_single(event
->cpu
,
424 __perf_event_remove_from_context
,
430 task_oncpu_function_call(task
, __perf_event_remove_from_context
,
433 spin_lock_irq(&ctx
->lock
);
435 * If the context is active we need to retry the smp call.
437 if (ctx
->nr_active
&& !list_empty(&event
->group_entry
)) {
438 spin_unlock_irq(&ctx
->lock
);
443 * The lock prevents that this context is scheduled in so we
444 * can remove the event safely, if the call above did not
447 if (!list_empty(&event
->group_entry
)) {
448 list_del_event(event
, ctx
);
450 spin_unlock_irq(&ctx
->lock
);
453 static inline u64
perf_clock(void)
455 return cpu_clock(smp_processor_id());
459 * Update the record of the current time in a context.
461 static void update_context_time(struct perf_event_context
*ctx
)
463 u64 now
= perf_clock();
465 ctx
->time
+= now
- ctx
->timestamp
;
466 ctx
->timestamp
= now
;
470 * Update the total_time_enabled and total_time_running fields for a event.
472 static void update_event_times(struct perf_event
*event
)
474 struct perf_event_context
*ctx
= event
->ctx
;
477 if (event
->state
< PERF_EVENT_STATE_INACTIVE
||
478 event
->group_leader
->state
< PERF_EVENT_STATE_INACTIVE
)
481 event
->total_time_enabled
= ctx
->time
- event
->tstamp_enabled
;
483 if (event
->state
== PERF_EVENT_STATE_INACTIVE
)
484 run_end
= event
->tstamp_stopped
;
488 event
->total_time_running
= run_end
- event
->tstamp_running
;
492 * Update total_time_enabled and total_time_running for all events in a group.
494 static void update_group_times(struct perf_event
*leader
)
496 struct perf_event
*event
;
498 update_event_times(leader
);
499 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
)
500 update_event_times(event
);
504 * Cross CPU call to disable a performance event
506 static void __perf_event_disable(void *info
)
508 struct perf_event
*event
= info
;
509 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
510 struct perf_event_context
*ctx
= event
->ctx
;
513 * If this is a per-task event, need to check whether this
514 * event's task is the current task on this cpu.
516 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
519 spin_lock(&ctx
->lock
);
522 * If the event is on, turn it off.
523 * If it is in error state, leave it in error state.
525 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
) {
526 update_context_time(ctx
);
527 update_group_times(event
);
528 if (event
== event
->group_leader
)
529 group_sched_out(event
, cpuctx
, ctx
);
531 event_sched_out(event
, cpuctx
, ctx
);
532 event
->state
= PERF_EVENT_STATE_OFF
;
535 spin_unlock(&ctx
->lock
);
541 * If event->ctx is a cloned context, callers must make sure that
542 * every task struct that event->ctx->task could possibly point to
543 * remains valid. This condition is satisifed when called through
544 * perf_event_for_each_child or perf_event_for_each because they
545 * hold the top-level event's child_mutex, so any descendant that
546 * goes to exit will block in sync_child_event.
547 * When called from perf_pending_event it's OK because event->ctx
548 * is the current context on this CPU and preemption is disabled,
549 * hence we can't get into perf_event_task_sched_out for this context.
551 static void perf_event_disable(struct perf_event
*event
)
553 struct perf_event_context
*ctx
= event
->ctx
;
554 struct task_struct
*task
= ctx
->task
;
558 * Disable the event on the cpu that it's on
560 smp_call_function_single(event
->cpu
, __perf_event_disable
,
566 task_oncpu_function_call(task
, __perf_event_disable
, event
);
568 spin_lock_irq(&ctx
->lock
);
570 * If the event is still active, we need to retry the cross-call.
572 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
573 spin_unlock_irq(&ctx
->lock
);
578 * Since we have the lock this context can't be scheduled
579 * in, so we can change the state safely.
581 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
582 update_group_times(event
);
583 event
->state
= PERF_EVENT_STATE_OFF
;
586 spin_unlock_irq(&ctx
->lock
);
590 event_sched_in(struct perf_event
*event
,
591 struct perf_cpu_context
*cpuctx
,
592 struct perf_event_context
*ctx
,
595 if (event
->state
<= PERF_EVENT_STATE_OFF
)
598 event
->state
= PERF_EVENT_STATE_ACTIVE
;
599 event
->oncpu
= cpu
; /* TODO: put 'cpu' into cpuctx->cpu */
601 * The new state must be visible before we turn it on in the hardware:
605 if (event
->pmu
->enable(event
)) {
606 event
->state
= PERF_EVENT_STATE_INACTIVE
;
611 event
->tstamp_running
+= ctx
->time
- event
->tstamp_stopped
;
613 if (!is_software_event(event
))
614 cpuctx
->active_oncpu
++;
617 if (event
->attr
.exclusive
)
618 cpuctx
->exclusive
= 1;
624 group_sched_in(struct perf_event
*group_event
,
625 struct perf_cpu_context
*cpuctx
,
626 struct perf_event_context
*ctx
,
629 struct perf_event
*event
, *partial_group
;
632 if (group_event
->state
== PERF_EVENT_STATE_OFF
)
635 ret
= hw_perf_group_sched_in(group_event
, cpuctx
, ctx
, cpu
);
637 return ret
< 0 ? ret
: 0;
639 if (event_sched_in(group_event
, cpuctx
, ctx
, cpu
))
643 * Schedule in siblings as one group (if any):
645 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
646 if (event_sched_in(event
, cpuctx
, ctx
, cpu
)) {
647 partial_group
= event
;
656 * Groups can be scheduled in as one unit only, so undo any
657 * partial group before returning:
659 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
660 if (event
== partial_group
)
662 event_sched_out(event
, cpuctx
, ctx
);
664 event_sched_out(group_event
, cpuctx
, ctx
);
670 * Return 1 for a group consisting entirely of software events,
671 * 0 if the group contains any hardware events.
673 static int is_software_only_group(struct perf_event
*leader
)
675 struct perf_event
*event
;
677 if (!is_software_event(leader
))
680 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
)
681 if (!is_software_event(event
))
688 * Work out whether we can put this event group on the CPU now.
690 static int group_can_go_on(struct perf_event
*event
,
691 struct perf_cpu_context
*cpuctx
,
695 * Groups consisting entirely of software events can always go on.
697 if (is_software_only_group(event
))
700 * If an exclusive group is already on, no other hardware
703 if (cpuctx
->exclusive
)
706 * If this group is exclusive and there are already
707 * events on the CPU, it can't go on.
709 if (event
->attr
.exclusive
&& cpuctx
->active_oncpu
)
712 * Otherwise, try to add it if all previous groups were able
718 static void add_event_to_ctx(struct perf_event
*event
,
719 struct perf_event_context
*ctx
)
721 list_add_event(event
, ctx
);
722 event
->tstamp_enabled
= ctx
->time
;
723 event
->tstamp_running
= ctx
->time
;
724 event
->tstamp_stopped
= ctx
->time
;
728 * Cross CPU call to install and enable a performance event
730 * Must be called with ctx->mutex held
732 static void __perf_install_in_context(void *info
)
734 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
735 struct perf_event
*event
= info
;
736 struct perf_event_context
*ctx
= event
->ctx
;
737 struct perf_event
*leader
= event
->group_leader
;
738 int cpu
= smp_processor_id();
742 * If this is a task context, we need to check whether it is
743 * the current task context of this cpu. If not it has been
744 * scheduled out before the smp call arrived.
745 * Or possibly this is the right context but it isn't
746 * on this cpu because it had no events.
748 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
749 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
751 cpuctx
->task_ctx
= ctx
;
754 spin_lock(&ctx
->lock
);
756 update_context_time(ctx
);
759 * Protect the list operation against NMI by disabling the
760 * events on a global level. NOP for non NMI based events.
764 add_event_to_ctx(event
, ctx
);
767 * Don't put the event on if it is disabled or if
768 * it is in a group and the group isn't on.
770 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
||
771 (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
))
775 * An exclusive event can't go on if there are already active
776 * hardware events, and no hardware event can go on if there
777 * is already an exclusive event on.
779 if (!group_can_go_on(event
, cpuctx
, 1))
782 err
= event_sched_in(event
, cpuctx
, ctx
, cpu
);
786 * This event couldn't go on. If it is in a group
787 * then we have to pull the whole group off.
788 * If the event group is pinned then put it in error state.
791 group_sched_out(leader
, cpuctx
, ctx
);
792 if (leader
->attr
.pinned
) {
793 update_group_times(leader
);
794 leader
->state
= PERF_EVENT_STATE_ERROR
;
798 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
799 cpuctx
->max_pertask
--;
804 spin_unlock(&ctx
->lock
);
808 * Attach a performance event to a context
810 * First we add the event to the list with the hardware enable bit
811 * in event->hw_config cleared.
813 * If the event is attached to a task which is on a CPU we use a smp
814 * call to enable it in the task context. The task might have been
815 * scheduled away, but we check this in the smp call again.
817 * Must be called with ctx->mutex held.
820 perf_install_in_context(struct perf_event_context
*ctx
,
821 struct perf_event
*event
,
824 struct task_struct
*task
= ctx
->task
;
828 * Per cpu events are installed via an smp call and
829 * the install is always sucessful.
831 smp_call_function_single(cpu
, __perf_install_in_context
,
837 task_oncpu_function_call(task
, __perf_install_in_context
,
840 spin_lock_irq(&ctx
->lock
);
842 * we need to retry the smp call.
844 if (ctx
->is_active
&& list_empty(&event
->group_entry
)) {
845 spin_unlock_irq(&ctx
->lock
);
850 * The lock prevents that this context is scheduled in so we
851 * can add the event safely, if it the call above did not
854 if (list_empty(&event
->group_entry
))
855 add_event_to_ctx(event
, ctx
);
856 spin_unlock_irq(&ctx
->lock
);
860 * Put a event into inactive state and update time fields.
861 * Enabling the leader of a group effectively enables all
862 * the group members that aren't explicitly disabled, so we
863 * have to update their ->tstamp_enabled also.
864 * Note: this works for group members as well as group leaders
865 * since the non-leader members' sibling_lists will be empty.
867 static void __perf_event_mark_enabled(struct perf_event
*event
,
868 struct perf_event_context
*ctx
)
870 struct perf_event
*sub
;
872 event
->state
= PERF_EVENT_STATE_INACTIVE
;
873 event
->tstamp_enabled
= ctx
->time
- event
->total_time_enabled
;
874 list_for_each_entry(sub
, &event
->sibling_list
, group_entry
)
875 if (sub
->state
>= PERF_EVENT_STATE_INACTIVE
)
876 sub
->tstamp_enabled
=
877 ctx
->time
- sub
->total_time_enabled
;
881 * Cross CPU call to enable a performance event
883 static void __perf_event_enable(void *info
)
885 struct perf_event
*event
= info
;
886 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
887 struct perf_event_context
*ctx
= event
->ctx
;
888 struct perf_event
*leader
= event
->group_leader
;
892 * If this is a per-task event, need to check whether this
893 * event's task is the current task on this cpu.
895 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
896 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
898 cpuctx
->task_ctx
= ctx
;
901 spin_lock(&ctx
->lock
);
903 update_context_time(ctx
);
905 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
907 __perf_event_mark_enabled(event
, ctx
);
910 * If the event is in a group and isn't the group leader,
911 * then don't put it on unless the group is on.
913 if (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
)
916 if (!group_can_go_on(event
, cpuctx
, 1)) {
921 err
= group_sched_in(event
, cpuctx
, ctx
,
924 err
= event_sched_in(event
, cpuctx
, ctx
,
931 * If this event can't go on and it's part of a
932 * group, then the whole group has to come off.
935 group_sched_out(leader
, cpuctx
, ctx
);
936 if (leader
->attr
.pinned
) {
937 update_group_times(leader
);
938 leader
->state
= PERF_EVENT_STATE_ERROR
;
943 spin_unlock(&ctx
->lock
);
949 * If event->ctx is a cloned context, callers must make sure that
950 * every task struct that event->ctx->task could possibly point to
951 * remains valid. This condition is satisfied when called through
952 * perf_event_for_each_child or perf_event_for_each as described
953 * for perf_event_disable.
955 static void perf_event_enable(struct perf_event
*event
)
957 struct perf_event_context
*ctx
= event
->ctx
;
958 struct task_struct
*task
= ctx
->task
;
962 * Enable the event on the cpu that it's on
964 smp_call_function_single(event
->cpu
, __perf_event_enable
,
969 spin_lock_irq(&ctx
->lock
);
970 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
974 * If the event is in error state, clear that first.
975 * That way, if we see the event in error state below, we
976 * know that it has gone back into error state, as distinct
977 * from the task having been scheduled away before the
978 * cross-call arrived.
980 if (event
->state
== PERF_EVENT_STATE_ERROR
)
981 event
->state
= PERF_EVENT_STATE_OFF
;
984 spin_unlock_irq(&ctx
->lock
);
985 task_oncpu_function_call(task
, __perf_event_enable
, event
);
987 spin_lock_irq(&ctx
->lock
);
990 * If the context is active and the event is still off,
991 * we need to retry the cross-call.
993 if (ctx
->is_active
&& event
->state
== PERF_EVENT_STATE_OFF
)
997 * Since we have the lock this context can't be scheduled
998 * in, so we can change the state safely.
1000 if (event
->state
== PERF_EVENT_STATE_OFF
)
1001 __perf_event_mark_enabled(event
, ctx
);
1004 spin_unlock_irq(&ctx
->lock
);
1007 static int perf_event_refresh(struct perf_event
*event
, int refresh
)
1010 * not supported on inherited events
1012 if (event
->attr
.inherit
)
1015 atomic_add(refresh
, &event
->event_limit
);
1016 perf_event_enable(event
);
1021 void __perf_event_sched_out(struct perf_event_context
*ctx
,
1022 struct perf_cpu_context
*cpuctx
)
1024 struct perf_event
*event
;
1026 spin_lock(&ctx
->lock
);
1028 if (likely(!ctx
->nr_events
))
1030 update_context_time(ctx
);
1034 list_for_each_entry(event
, &ctx
->group_list
, group_entry
)
1035 group_sched_out(event
, cpuctx
, ctx
);
1039 spin_unlock(&ctx
->lock
);
1043 * Test whether two contexts are equivalent, i.e. whether they
1044 * have both been cloned from the same version of the same context
1045 * and they both have the same number of enabled events.
1046 * If the number of enabled events is the same, then the set
1047 * of enabled events should be the same, because these are both
1048 * inherited contexts, therefore we can't access individual events
1049 * in them directly with an fd; we can only enable/disable all
1050 * events via prctl, or enable/disable all events in a family
1051 * via ioctl, which will have the same effect on both contexts.
1053 static int context_equiv(struct perf_event_context
*ctx1
,
1054 struct perf_event_context
*ctx2
)
1056 return ctx1
->parent_ctx
&& ctx1
->parent_ctx
== ctx2
->parent_ctx
1057 && ctx1
->parent_gen
== ctx2
->parent_gen
1058 && !ctx1
->pin_count
&& !ctx2
->pin_count
;
1061 static void __perf_event_read(void *event
);
1063 static void __perf_event_sync_stat(struct perf_event
*event
,
1064 struct perf_event
*next_event
)
1068 if (!event
->attr
.inherit_stat
)
1072 * Update the event value, we cannot use perf_event_read()
1073 * because we're in the middle of a context switch and have IRQs
1074 * disabled, which upsets smp_call_function_single(), however
1075 * we know the event must be on the current CPU, therefore we
1076 * don't need to use it.
1078 switch (event
->state
) {
1079 case PERF_EVENT_STATE_ACTIVE
:
1080 __perf_event_read(event
);
1083 case PERF_EVENT_STATE_INACTIVE
:
1084 update_event_times(event
);
1092 * In order to keep per-task stats reliable we need to flip the event
1093 * values when we flip the contexts.
1095 value
= atomic64_read(&next_event
->count
);
1096 value
= atomic64_xchg(&event
->count
, value
);
1097 atomic64_set(&next_event
->count
, value
);
1099 swap(event
->total_time_enabled
, next_event
->total_time_enabled
);
1100 swap(event
->total_time_running
, next_event
->total_time_running
);
1103 * Since we swizzled the values, update the user visible data too.
1105 perf_event_update_userpage(event
);
1106 perf_event_update_userpage(next_event
);
1109 #define list_next_entry(pos, member) \
1110 list_entry(pos->member.next, typeof(*pos), member)
1112 static void perf_event_sync_stat(struct perf_event_context
*ctx
,
1113 struct perf_event_context
*next_ctx
)
1115 struct perf_event
*event
, *next_event
;
1120 event
= list_first_entry(&ctx
->event_list
,
1121 struct perf_event
, event_entry
);
1123 next_event
= list_first_entry(&next_ctx
->event_list
,
1124 struct perf_event
, event_entry
);
1126 while (&event
->event_entry
!= &ctx
->event_list
&&
1127 &next_event
->event_entry
!= &next_ctx
->event_list
) {
1129 __perf_event_sync_stat(event
, next_event
);
1131 event
= list_next_entry(event
, event_entry
);
1132 next_event
= list_next_entry(next_event
, event_entry
);
1137 * Called from scheduler to remove the events of the current task,
1138 * with interrupts disabled.
1140 * We stop each event and update the event value in event->count.
1142 * This does not protect us against NMI, but disable()
1143 * sets the disabled bit in the control field of event _before_
1144 * accessing the event control register. If a NMI hits, then it will
1145 * not restart the event.
1147 void perf_event_task_sched_out(struct task_struct
*task
,
1148 struct task_struct
*next
, int cpu
)
1150 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1151 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1152 struct perf_event_context
*next_ctx
;
1153 struct perf_event_context
*parent
;
1154 struct pt_regs
*regs
;
1157 regs
= task_pt_regs(task
);
1158 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES
, 1, 1, regs
, 0);
1160 if (likely(!ctx
|| !cpuctx
->task_ctx
))
1163 update_context_time(ctx
);
1166 parent
= rcu_dereference(ctx
->parent_ctx
);
1167 next_ctx
= next
->perf_event_ctxp
;
1168 if (parent
&& next_ctx
&&
1169 rcu_dereference(next_ctx
->parent_ctx
) == parent
) {
1171 * Looks like the two contexts are clones, so we might be
1172 * able to optimize the context switch. We lock both
1173 * contexts and check that they are clones under the
1174 * lock (including re-checking that neither has been
1175 * uncloned in the meantime). It doesn't matter which
1176 * order we take the locks because no other cpu could
1177 * be trying to lock both of these tasks.
1179 spin_lock(&ctx
->lock
);
1180 spin_lock_nested(&next_ctx
->lock
, SINGLE_DEPTH_NESTING
);
1181 if (context_equiv(ctx
, next_ctx
)) {
1183 * XXX do we need a memory barrier of sorts
1184 * wrt to rcu_dereference() of perf_event_ctxp
1186 task
->perf_event_ctxp
= next_ctx
;
1187 next
->perf_event_ctxp
= ctx
;
1189 next_ctx
->task
= task
;
1192 perf_event_sync_stat(ctx
, next_ctx
);
1194 spin_unlock(&next_ctx
->lock
);
1195 spin_unlock(&ctx
->lock
);
1200 __perf_event_sched_out(ctx
, cpuctx
);
1201 cpuctx
->task_ctx
= NULL
;
1206 * Called with IRQs disabled
1208 static void __perf_event_task_sched_out(struct perf_event_context
*ctx
)
1210 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1212 if (!cpuctx
->task_ctx
)
1215 if (WARN_ON_ONCE(ctx
!= cpuctx
->task_ctx
))
1218 __perf_event_sched_out(ctx
, cpuctx
);
1219 cpuctx
->task_ctx
= NULL
;
1223 * Called with IRQs disabled
1225 static void perf_event_cpu_sched_out(struct perf_cpu_context
*cpuctx
)
1227 __perf_event_sched_out(&cpuctx
->ctx
, cpuctx
);
1231 __perf_event_sched_in(struct perf_event_context
*ctx
,
1232 struct perf_cpu_context
*cpuctx
, int cpu
)
1234 struct perf_event
*event
;
1237 spin_lock(&ctx
->lock
);
1239 if (likely(!ctx
->nr_events
))
1242 ctx
->timestamp
= perf_clock();
1247 * First go through the list and put on any pinned groups
1248 * in order to give them the best chance of going on.
1250 list_for_each_entry(event
, &ctx
->group_list
, group_entry
) {
1251 if (event
->state
<= PERF_EVENT_STATE_OFF
||
1252 !event
->attr
.pinned
)
1254 if (event
->cpu
!= -1 && event
->cpu
!= cpu
)
1257 if (group_can_go_on(event
, cpuctx
, 1))
1258 group_sched_in(event
, cpuctx
, ctx
, cpu
);
1261 * If this pinned group hasn't been scheduled,
1262 * put it in error state.
1264 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1265 update_group_times(event
);
1266 event
->state
= PERF_EVENT_STATE_ERROR
;
1270 list_for_each_entry(event
, &ctx
->group_list
, group_entry
) {
1272 * Ignore events in OFF or ERROR state, and
1273 * ignore pinned events since we did them already.
1275 if (event
->state
<= PERF_EVENT_STATE_OFF
||
1280 * Listen to the 'cpu' scheduling filter constraint
1283 if (event
->cpu
!= -1 && event
->cpu
!= cpu
)
1286 if (group_can_go_on(event
, cpuctx
, can_add_hw
))
1287 if (group_sched_in(event
, cpuctx
, ctx
, cpu
))
1292 spin_unlock(&ctx
->lock
);
1296 * Called from scheduler to add the events of the current task
1297 * with interrupts disabled.
1299 * We restore the event value and then enable it.
1301 * This does not protect us against NMI, but enable()
1302 * sets the enabled bit in the control field of event _before_
1303 * accessing the event control register. If a NMI hits, then it will
1304 * keep the event running.
1306 void perf_event_task_sched_in(struct task_struct
*task
, int cpu
)
1308 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1309 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1313 if (cpuctx
->task_ctx
== ctx
)
1315 __perf_event_sched_in(ctx
, cpuctx
, cpu
);
1316 cpuctx
->task_ctx
= ctx
;
1319 static void perf_event_cpu_sched_in(struct perf_cpu_context
*cpuctx
, int cpu
)
1321 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
1323 __perf_event_sched_in(ctx
, cpuctx
, cpu
);
1326 #define MAX_INTERRUPTS (~0ULL)
1328 static void perf_log_throttle(struct perf_event
*event
, int enable
);
1330 static void perf_adjust_period(struct perf_event
*event
, u64 events
)
1332 struct hw_perf_event
*hwc
= &event
->hw
;
1333 u64 period
, sample_period
;
1336 events
*= hwc
->sample_period
;
1337 period
= div64_u64(events
, event
->attr
.sample_freq
);
1339 delta
= (s64
)(period
- hwc
->sample_period
);
1340 delta
= (delta
+ 7) / 8; /* low pass filter */
1342 sample_period
= hwc
->sample_period
+ delta
;
1347 hwc
->sample_period
= sample_period
;
1350 static void perf_ctx_adjust_freq(struct perf_event_context
*ctx
)
1352 struct perf_event
*event
;
1353 struct hw_perf_event
*hwc
;
1354 u64 interrupts
, freq
;
1356 spin_lock(&ctx
->lock
);
1357 list_for_each_entry(event
, &ctx
->group_list
, group_entry
) {
1358 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
1363 interrupts
= hwc
->interrupts
;
1364 hwc
->interrupts
= 0;
1367 * unthrottle events on the tick
1369 if (interrupts
== MAX_INTERRUPTS
) {
1370 perf_log_throttle(event
, 1);
1371 event
->pmu
->unthrottle(event
);
1372 interrupts
= 2*sysctl_perf_event_sample_rate
/HZ
;
1375 if (!event
->attr
.freq
|| !event
->attr
.sample_freq
)
1379 * if the specified freq < HZ then we need to skip ticks
1381 if (event
->attr
.sample_freq
< HZ
) {
1382 freq
= event
->attr
.sample_freq
;
1384 hwc
->freq_count
+= freq
;
1385 hwc
->freq_interrupts
+= interrupts
;
1387 if (hwc
->freq_count
< HZ
)
1390 interrupts
= hwc
->freq_interrupts
;
1391 hwc
->freq_interrupts
= 0;
1392 hwc
->freq_count
-= HZ
;
1396 perf_adjust_period(event
, freq
* interrupts
);
1399 * In order to avoid being stalled by an (accidental) huge
1400 * sample period, force reset the sample period if we didn't
1401 * get any events in this freq period.
1405 event
->pmu
->disable(event
);
1406 atomic64_set(&hwc
->period_left
, 0);
1407 event
->pmu
->enable(event
);
1411 spin_unlock(&ctx
->lock
);
1415 * Round-robin a context's events:
1417 static void rotate_ctx(struct perf_event_context
*ctx
)
1419 struct perf_event
*event
;
1421 if (!ctx
->nr_events
)
1424 spin_lock(&ctx
->lock
);
1426 * Rotate the first entry last (works just fine for group events too):
1429 list_for_each_entry(event
, &ctx
->group_list
, group_entry
) {
1430 list_move_tail(&event
->group_entry
, &ctx
->group_list
);
1435 spin_unlock(&ctx
->lock
);
1438 void perf_event_task_tick(struct task_struct
*curr
, int cpu
)
1440 struct perf_cpu_context
*cpuctx
;
1441 struct perf_event_context
*ctx
;
1443 if (!atomic_read(&nr_events
))
1446 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1447 ctx
= curr
->perf_event_ctxp
;
1449 perf_ctx_adjust_freq(&cpuctx
->ctx
);
1451 perf_ctx_adjust_freq(ctx
);
1453 perf_event_cpu_sched_out(cpuctx
);
1455 __perf_event_task_sched_out(ctx
);
1457 rotate_ctx(&cpuctx
->ctx
);
1461 perf_event_cpu_sched_in(cpuctx
, cpu
);
1463 perf_event_task_sched_in(curr
, cpu
);
1467 * Enable all of a task's events that have been marked enable-on-exec.
1468 * This expects task == current.
1470 static void perf_event_enable_on_exec(struct task_struct
*task
)
1472 struct perf_event_context
*ctx
;
1473 struct perf_event
*event
;
1474 unsigned long flags
;
1477 local_irq_save(flags
);
1478 ctx
= task
->perf_event_ctxp
;
1479 if (!ctx
|| !ctx
->nr_events
)
1482 __perf_event_task_sched_out(ctx
);
1484 spin_lock(&ctx
->lock
);
1486 list_for_each_entry(event
, &ctx
->group_list
, group_entry
) {
1487 if (!event
->attr
.enable_on_exec
)
1489 event
->attr
.enable_on_exec
= 0;
1490 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
1492 __perf_event_mark_enabled(event
, ctx
);
1497 * Unclone this context if we enabled any event.
1502 spin_unlock(&ctx
->lock
);
1504 perf_event_task_sched_in(task
, smp_processor_id());
1506 local_irq_restore(flags
);
1510 * Cross CPU call to read the hardware event
1512 static void __perf_event_read(void *info
)
1514 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1515 struct perf_event
*event
= info
;
1516 struct perf_event_context
*ctx
= event
->ctx
;
1517 unsigned long flags
;
1520 * If this is a task context, we need to check whether it is
1521 * the current task context of this cpu. If not it has been
1522 * scheduled out before the smp call arrived. In that case
1523 * event->count would have been updated to a recent sample
1524 * when the event was scheduled out.
1526 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
1529 local_irq_save(flags
);
1531 update_context_time(ctx
);
1532 event
->pmu
->read(event
);
1533 update_event_times(event
);
1534 local_irq_restore(flags
);
1537 static u64
perf_event_read(struct perf_event
*event
)
1540 * If event is enabled and currently active on a CPU, update the
1541 * value in the event structure:
1543 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
1544 smp_call_function_single(event
->oncpu
,
1545 __perf_event_read
, event
, 1);
1546 } else if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1547 update_event_times(event
);
1550 return atomic64_read(&event
->count
);
1554 * Initialize the perf_event context in a task_struct:
1557 __perf_event_init_context(struct perf_event_context
*ctx
,
1558 struct task_struct
*task
)
1560 memset(ctx
, 0, sizeof(*ctx
));
1561 spin_lock_init(&ctx
->lock
);
1562 mutex_init(&ctx
->mutex
);
1563 INIT_LIST_HEAD(&ctx
->group_list
);
1564 INIT_LIST_HEAD(&ctx
->event_list
);
1565 atomic_set(&ctx
->refcount
, 1);
1569 static struct perf_event_context
*find_get_context(pid_t pid
, int cpu
)
1571 struct perf_event_context
*ctx
;
1572 struct perf_cpu_context
*cpuctx
;
1573 struct task_struct
*task
;
1574 unsigned long flags
;
1578 * If cpu is not a wildcard then this is a percpu event:
1581 /* Must be root to operate on a CPU event: */
1582 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1583 return ERR_PTR(-EACCES
);
1585 if (cpu
< 0 || cpu
> num_possible_cpus())
1586 return ERR_PTR(-EINVAL
);
1589 * We could be clever and allow to attach a event to an
1590 * offline CPU and activate it when the CPU comes up, but
1593 if (!cpu_isset(cpu
, cpu_online_map
))
1594 return ERR_PTR(-ENODEV
);
1596 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1607 task
= find_task_by_vpid(pid
);
1609 get_task_struct(task
);
1613 return ERR_PTR(-ESRCH
);
1616 * Can't attach events to a dying task.
1619 if (task
->flags
& PF_EXITING
)
1622 /* Reuse ptrace permission checks for now. */
1624 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
1628 ctx
= perf_lock_task_context(task
, &flags
);
1631 spin_unlock_irqrestore(&ctx
->lock
, flags
);
1635 ctx
= kmalloc(sizeof(struct perf_event_context
), GFP_KERNEL
);
1639 __perf_event_init_context(ctx
, task
);
1641 if (cmpxchg(&task
->perf_event_ctxp
, NULL
, ctx
)) {
1643 * We raced with some other task; use
1644 * the context they set.
1649 get_task_struct(task
);
1652 put_task_struct(task
);
1656 put_task_struct(task
);
1657 return ERR_PTR(err
);
1660 static void free_event_rcu(struct rcu_head
*head
)
1662 struct perf_event
*event
;
1664 event
= container_of(head
, struct perf_event
, rcu_head
);
1666 put_pid_ns(event
->ns
);
1670 static void perf_pending_sync(struct perf_event
*event
);
1672 static void free_event(struct perf_event
*event
)
1674 perf_pending_sync(event
);
1676 if (!event
->parent
) {
1677 atomic_dec(&nr_events
);
1678 if (event
->attr
.mmap
)
1679 atomic_dec(&nr_mmap_events
);
1680 if (event
->attr
.comm
)
1681 atomic_dec(&nr_comm_events
);
1682 if (event
->attr
.task
)
1683 atomic_dec(&nr_task_events
);
1686 if (event
->output
) {
1687 fput(event
->output
->filp
);
1688 event
->output
= NULL
;
1692 event
->destroy(event
);
1694 put_ctx(event
->ctx
);
1695 call_rcu(&event
->rcu_head
, free_event_rcu
);
1699 * Called when the last reference to the file is gone.
1701 static int perf_release(struct inode
*inode
, struct file
*file
)
1703 struct perf_event
*event
= file
->private_data
;
1704 struct perf_event_context
*ctx
= event
->ctx
;
1706 file
->private_data
= NULL
;
1708 WARN_ON_ONCE(ctx
->parent_ctx
);
1709 mutex_lock(&ctx
->mutex
);
1710 perf_event_remove_from_context(event
);
1711 mutex_unlock(&ctx
->mutex
);
1713 mutex_lock(&event
->owner
->perf_event_mutex
);
1714 list_del_init(&event
->owner_entry
);
1715 mutex_unlock(&event
->owner
->perf_event_mutex
);
1716 put_task_struct(event
->owner
);
1723 static int perf_event_read_size(struct perf_event
*event
)
1725 int entry
= sizeof(u64
); /* value */
1729 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1730 size
+= sizeof(u64
);
1732 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1733 size
+= sizeof(u64
);
1735 if (event
->attr
.read_format
& PERF_FORMAT_ID
)
1736 entry
+= sizeof(u64
);
1738 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
) {
1739 nr
+= event
->group_leader
->nr_siblings
;
1740 size
+= sizeof(u64
);
1748 static u64
perf_event_read_value(struct perf_event
*event
)
1750 struct perf_event
*child
;
1753 total
+= perf_event_read(event
);
1754 list_for_each_entry(child
, &event
->child_list
, child_list
)
1755 total
+= perf_event_read(child
);
1760 static int perf_event_read_entry(struct perf_event
*event
,
1761 u64 read_format
, char __user
*buf
)
1763 int n
= 0, count
= 0;
1766 values
[n
++] = perf_event_read_value(event
);
1767 if (read_format
& PERF_FORMAT_ID
)
1768 values
[n
++] = primary_event_id(event
);
1770 count
= n
* sizeof(u64
);
1772 if (copy_to_user(buf
, values
, count
))
1778 static int perf_event_read_group(struct perf_event
*event
,
1779 u64 read_format
, char __user
*buf
)
1781 struct perf_event
*leader
= event
->group_leader
, *sub
;
1782 int n
= 0, size
= 0, err
= -EFAULT
;
1785 values
[n
++] = 1 + leader
->nr_siblings
;
1786 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1787 values
[n
++] = leader
->total_time_enabled
+
1788 atomic64_read(&leader
->child_total_time_enabled
);
1790 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1791 values
[n
++] = leader
->total_time_running
+
1792 atomic64_read(&leader
->child_total_time_running
);
1795 size
= n
* sizeof(u64
);
1797 if (copy_to_user(buf
, values
, size
))
1800 err
= perf_event_read_entry(leader
, read_format
, buf
+ size
);
1806 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
1807 err
= perf_event_read_entry(sub
, read_format
,
1818 static int perf_event_read_one(struct perf_event
*event
,
1819 u64 read_format
, char __user
*buf
)
1824 values
[n
++] = perf_event_read_value(event
);
1825 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
1826 values
[n
++] = event
->total_time_enabled
+
1827 atomic64_read(&event
->child_total_time_enabled
);
1829 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
1830 values
[n
++] = event
->total_time_running
+
1831 atomic64_read(&event
->child_total_time_running
);
1833 if (read_format
& PERF_FORMAT_ID
)
1834 values
[n
++] = primary_event_id(event
);
1836 if (copy_to_user(buf
, values
, n
* sizeof(u64
)))
1839 return n
* sizeof(u64
);
1843 * Read the performance event - simple non blocking version for now
1846 perf_read_hw(struct perf_event
*event
, char __user
*buf
, size_t count
)
1848 u64 read_format
= event
->attr
.read_format
;
1852 * Return end-of-file for a read on a event that is in
1853 * error state (i.e. because it was pinned but it couldn't be
1854 * scheduled on to the CPU at some point).
1856 if (event
->state
== PERF_EVENT_STATE_ERROR
)
1859 if (count
< perf_event_read_size(event
))
1862 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
1863 mutex_lock(&event
->child_mutex
);
1864 if (read_format
& PERF_FORMAT_GROUP
)
1865 ret
= perf_event_read_group(event
, read_format
, buf
);
1867 ret
= perf_event_read_one(event
, read_format
, buf
);
1868 mutex_unlock(&event
->child_mutex
);
1874 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1876 struct perf_event
*event
= file
->private_data
;
1878 return perf_read_hw(event
, buf
, count
);
1881 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
1883 struct perf_event
*event
= file
->private_data
;
1884 struct perf_mmap_data
*data
;
1885 unsigned int events
= POLL_HUP
;
1888 data
= rcu_dereference(event
->data
);
1890 events
= atomic_xchg(&data
->poll
, 0);
1893 poll_wait(file
, &event
->waitq
, wait
);
1898 static void perf_event_reset(struct perf_event
*event
)
1900 (void)perf_event_read(event
);
1901 atomic64_set(&event
->count
, 0);
1902 perf_event_update_userpage(event
);
1906 * Holding the top-level event's child_mutex means that any
1907 * descendant process that has inherited this event will block
1908 * in sync_child_event if it goes to exit, thus satisfying the
1909 * task existence requirements of perf_event_enable/disable.
1911 static void perf_event_for_each_child(struct perf_event
*event
,
1912 void (*func
)(struct perf_event
*))
1914 struct perf_event
*child
;
1916 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
1917 mutex_lock(&event
->child_mutex
);
1919 list_for_each_entry(child
, &event
->child_list
, child_list
)
1921 mutex_unlock(&event
->child_mutex
);
1924 static void perf_event_for_each(struct perf_event
*event
,
1925 void (*func
)(struct perf_event
*))
1927 struct perf_event_context
*ctx
= event
->ctx
;
1928 struct perf_event
*sibling
;
1930 WARN_ON_ONCE(ctx
->parent_ctx
);
1931 mutex_lock(&ctx
->mutex
);
1932 event
= event
->group_leader
;
1934 perf_event_for_each_child(event
, func
);
1936 list_for_each_entry(sibling
, &event
->sibling_list
, group_entry
)
1937 perf_event_for_each_child(event
, func
);
1938 mutex_unlock(&ctx
->mutex
);
1941 static int perf_event_period(struct perf_event
*event
, u64 __user
*arg
)
1943 struct perf_event_context
*ctx
= event
->ctx
;
1948 if (!event
->attr
.sample_period
)
1951 size
= copy_from_user(&value
, arg
, sizeof(value
));
1952 if (size
!= sizeof(value
))
1958 spin_lock_irq(&ctx
->lock
);
1959 if (event
->attr
.freq
) {
1960 if (value
> sysctl_perf_event_sample_rate
) {
1965 event
->attr
.sample_freq
= value
;
1967 event
->attr
.sample_period
= value
;
1968 event
->hw
.sample_period
= value
;
1971 spin_unlock_irq(&ctx
->lock
);
1976 int perf_event_set_output(struct perf_event
*event
, int output_fd
);
1978 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1980 struct perf_event
*event
= file
->private_data
;
1981 void (*func
)(struct perf_event
*);
1985 case PERF_EVENT_IOC_ENABLE
:
1986 func
= perf_event_enable
;
1988 case PERF_EVENT_IOC_DISABLE
:
1989 func
= perf_event_disable
;
1991 case PERF_EVENT_IOC_RESET
:
1992 func
= perf_event_reset
;
1995 case PERF_EVENT_IOC_REFRESH
:
1996 return perf_event_refresh(event
, arg
);
1998 case PERF_EVENT_IOC_PERIOD
:
1999 return perf_event_period(event
, (u64 __user
*)arg
);
2001 case PERF_EVENT_IOC_SET_OUTPUT
:
2002 return perf_event_set_output(event
, arg
);
2008 if (flags
& PERF_IOC_FLAG_GROUP
)
2009 perf_event_for_each(event
, func
);
2011 perf_event_for_each_child(event
, func
);
2016 int perf_event_task_enable(void)
2018 struct perf_event
*event
;
2020 mutex_lock(¤t
->perf_event_mutex
);
2021 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2022 perf_event_for_each_child(event
, perf_event_enable
);
2023 mutex_unlock(¤t
->perf_event_mutex
);
2028 int perf_event_task_disable(void)
2030 struct perf_event
*event
;
2032 mutex_lock(¤t
->perf_event_mutex
);
2033 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2034 perf_event_for_each_child(event
, perf_event_disable
);
2035 mutex_unlock(¤t
->perf_event_mutex
);
2040 #ifndef PERF_EVENT_INDEX_OFFSET
2041 # define PERF_EVENT_INDEX_OFFSET 0
2044 static int perf_event_index(struct perf_event
*event
)
2046 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
2049 return event
->hw
.idx
+ 1 - PERF_EVENT_INDEX_OFFSET
;
2053 * Callers need to ensure there can be no nesting of this function, otherwise
2054 * the seqlock logic goes bad. We can not serialize this because the arch
2055 * code calls this from NMI context.
2057 void perf_event_update_userpage(struct perf_event
*event
)
2059 struct perf_event_mmap_page
*userpg
;
2060 struct perf_mmap_data
*data
;
2063 data
= rcu_dereference(event
->data
);
2067 userpg
= data
->user_page
;
2070 * Disable preemption so as to not let the corresponding user-space
2071 * spin too long if we get preempted.
2076 userpg
->index
= perf_event_index(event
);
2077 userpg
->offset
= atomic64_read(&event
->count
);
2078 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
2079 userpg
->offset
-= atomic64_read(&event
->hw
.prev_count
);
2081 userpg
->time_enabled
= event
->total_time_enabled
+
2082 atomic64_read(&event
->child_total_time_enabled
);
2084 userpg
->time_running
= event
->total_time_running
+
2085 atomic64_read(&event
->child_total_time_running
);
2094 static int perf_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2096 struct perf_event
*event
= vma
->vm_file
->private_data
;
2097 struct perf_mmap_data
*data
;
2098 int ret
= VM_FAULT_SIGBUS
;
2100 if (vmf
->flags
& FAULT_FLAG_MKWRITE
) {
2101 if (vmf
->pgoff
== 0)
2107 data
= rcu_dereference(event
->data
);
2111 if (vmf
->pgoff
== 0) {
2112 vmf
->page
= virt_to_page(data
->user_page
);
2114 int nr
= vmf
->pgoff
- 1;
2116 if ((unsigned)nr
> data
->nr_pages
)
2119 if (vmf
->flags
& FAULT_FLAG_WRITE
)
2122 vmf
->page
= virt_to_page(data
->data_pages
[nr
]);
2125 get_page(vmf
->page
);
2126 vmf
->page
->mapping
= vma
->vm_file
->f_mapping
;
2127 vmf
->page
->index
= vmf
->pgoff
;
2136 static int perf_mmap_data_alloc(struct perf_event
*event
, int nr_pages
)
2138 struct perf_mmap_data
*data
;
2142 WARN_ON(atomic_read(&event
->mmap_count
));
2144 size
= sizeof(struct perf_mmap_data
);
2145 size
+= nr_pages
* sizeof(void *);
2147 data
= kzalloc(size
, GFP_KERNEL
);
2151 data
->user_page
= (void *)get_zeroed_page(GFP_KERNEL
);
2152 if (!data
->user_page
)
2153 goto fail_user_page
;
2155 for (i
= 0; i
< nr_pages
; i
++) {
2156 data
->data_pages
[i
] = (void *)get_zeroed_page(GFP_KERNEL
);
2157 if (!data
->data_pages
[i
])
2158 goto fail_data_pages
;
2161 data
->nr_pages
= nr_pages
;
2162 atomic_set(&data
->lock
, -1);
2164 if (event
->attr
.watermark
) {
2165 data
->watermark
= min_t(long, PAGE_SIZE
* nr_pages
,
2166 event
->attr
.wakeup_watermark
);
2168 if (!data
->watermark
)
2169 data
->watermark
= max(PAGE_SIZE
, PAGE_SIZE
* nr_pages
/ 4);
2171 rcu_assign_pointer(event
->data
, data
);
2176 for (i
--; i
>= 0; i
--)
2177 free_page((unsigned long)data
->data_pages
[i
]);
2179 free_page((unsigned long)data
->user_page
);
2188 static void perf_mmap_free_page(unsigned long addr
)
2190 struct page
*page
= virt_to_page((void *)addr
);
2192 page
->mapping
= NULL
;
2196 static void __perf_mmap_data_free(struct rcu_head
*rcu_head
)
2198 struct perf_mmap_data
*data
;
2201 data
= container_of(rcu_head
, struct perf_mmap_data
, rcu_head
);
2203 perf_mmap_free_page((unsigned long)data
->user_page
);
2204 for (i
= 0; i
< data
->nr_pages
; i
++)
2205 perf_mmap_free_page((unsigned long)data
->data_pages
[i
]);
2210 static void perf_mmap_data_free(struct perf_event
*event
)
2212 struct perf_mmap_data
*data
= event
->data
;
2214 WARN_ON(atomic_read(&event
->mmap_count
));
2216 rcu_assign_pointer(event
->data
, NULL
);
2217 call_rcu(&data
->rcu_head
, __perf_mmap_data_free
);
2220 static void perf_mmap_open(struct vm_area_struct
*vma
)
2222 struct perf_event
*event
= vma
->vm_file
->private_data
;
2224 atomic_inc(&event
->mmap_count
);
2227 static void perf_mmap_close(struct vm_area_struct
*vma
)
2229 struct perf_event
*event
= vma
->vm_file
->private_data
;
2231 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2232 if (atomic_dec_and_mutex_lock(&event
->mmap_count
, &event
->mmap_mutex
)) {
2233 struct user_struct
*user
= current_user();
2235 atomic_long_sub(event
->data
->nr_pages
+ 1, &user
->locked_vm
);
2236 vma
->vm_mm
->locked_vm
-= event
->data
->nr_locked
;
2237 perf_mmap_data_free(event
);
2238 mutex_unlock(&event
->mmap_mutex
);
2242 static const struct vm_operations_struct perf_mmap_vmops
= {
2243 .open
= perf_mmap_open
,
2244 .close
= perf_mmap_close
,
2245 .fault
= perf_mmap_fault
,
2246 .page_mkwrite
= perf_mmap_fault
,
2249 static int perf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2251 struct perf_event
*event
= file
->private_data
;
2252 unsigned long user_locked
, user_lock_limit
;
2253 struct user_struct
*user
= current_user();
2254 unsigned long locked
, lock_limit
;
2255 unsigned long vma_size
;
2256 unsigned long nr_pages
;
2257 long user_extra
, extra
;
2260 if (!(vma
->vm_flags
& VM_SHARED
))
2263 vma_size
= vma
->vm_end
- vma
->vm_start
;
2264 nr_pages
= (vma_size
/ PAGE_SIZE
) - 1;
2267 * If we have data pages ensure they're a power-of-two number, so we
2268 * can do bitmasks instead of modulo.
2270 if (nr_pages
!= 0 && !is_power_of_2(nr_pages
))
2273 if (vma_size
!= PAGE_SIZE
* (1 + nr_pages
))
2276 if (vma
->vm_pgoff
!= 0)
2279 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2280 mutex_lock(&event
->mmap_mutex
);
2281 if (event
->output
) {
2286 if (atomic_inc_not_zero(&event
->mmap_count
)) {
2287 if (nr_pages
!= event
->data
->nr_pages
)
2292 user_extra
= nr_pages
+ 1;
2293 user_lock_limit
= sysctl_perf_event_mlock
>> (PAGE_SHIFT
- 10);
2296 * Increase the limit linearly with more CPUs:
2298 user_lock_limit
*= num_online_cpus();
2300 user_locked
= atomic_long_read(&user
->locked_vm
) + user_extra
;
2303 if (user_locked
> user_lock_limit
)
2304 extra
= user_locked
- user_lock_limit
;
2306 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
2307 lock_limit
>>= PAGE_SHIFT
;
2308 locked
= vma
->vm_mm
->locked_vm
+ extra
;
2310 if ((locked
> lock_limit
) && perf_paranoid_tracepoint_raw() &&
2311 !capable(CAP_IPC_LOCK
)) {
2316 WARN_ON(event
->data
);
2317 ret
= perf_mmap_data_alloc(event
, nr_pages
);
2321 atomic_set(&event
->mmap_count
, 1);
2322 atomic_long_add(user_extra
, &user
->locked_vm
);
2323 vma
->vm_mm
->locked_vm
+= extra
;
2324 event
->data
->nr_locked
= extra
;
2325 if (vma
->vm_flags
& VM_WRITE
)
2326 event
->data
->writable
= 1;
2329 mutex_unlock(&event
->mmap_mutex
);
2331 vma
->vm_flags
|= VM_RESERVED
;
2332 vma
->vm_ops
= &perf_mmap_vmops
;
2337 static int perf_fasync(int fd
, struct file
*filp
, int on
)
2339 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
2340 struct perf_event
*event
= filp
->private_data
;
2343 mutex_lock(&inode
->i_mutex
);
2344 retval
= fasync_helper(fd
, filp
, on
, &event
->fasync
);
2345 mutex_unlock(&inode
->i_mutex
);
2353 static const struct file_operations perf_fops
= {
2354 .release
= perf_release
,
2357 .unlocked_ioctl
= perf_ioctl
,
2358 .compat_ioctl
= perf_ioctl
,
2360 .fasync
= perf_fasync
,
2366 * If there's data, ensure we set the poll() state and publish everything
2367 * to user-space before waking everybody up.
2370 void perf_event_wakeup(struct perf_event
*event
)
2372 wake_up_all(&event
->waitq
);
2374 if (event
->pending_kill
) {
2375 kill_fasync(&event
->fasync
, SIGIO
, event
->pending_kill
);
2376 event
->pending_kill
= 0;
2383 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2385 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2386 * single linked list and use cmpxchg() to add entries lockless.
2389 static void perf_pending_event(struct perf_pending_entry
*entry
)
2391 struct perf_event
*event
= container_of(entry
,
2392 struct perf_event
, pending
);
2394 if (event
->pending_disable
) {
2395 event
->pending_disable
= 0;
2396 __perf_event_disable(event
);
2399 if (event
->pending_wakeup
) {
2400 event
->pending_wakeup
= 0;
2401 perf_event_wakeup(event
);
2405 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2407 static DEFINE_PER_CPU(struct perf_pending_entry
*, perf_pending_head
) = {
2411 static void perf_pending_queue(struct perf_pending_entry
*entry
,
2412 void (*func
)(struct perf_pending_entry
*))
2414 struct perf_pending_entry
**head
;
2416 if (cmpxchg(&entry
->next
, NULL
, PENDING_TAIL
) != NULL
)
2421 head
= &get_cpu_var(perf_pending_head
);
2424 entry
->next
= *head
;
2425 } while (cmpxchg(head
, entry
->next
, entry
) != entry
->next
);
2427 set_perf_event_pending();
2429 put_cpu_var(perf_pending_head
);
2432 static int __perf_pending_run(void)
2434 struct perf_pending_entry
*list
;
2437 list
= xchg(&__get_cpu_var(perf_pending_head
), PENDING_TAIL
);
2438 while (list
!= PENDING_TAIL
) {
2439 void (*func
)(struct perf_pending_entry
*);
2440 struct perf_pending_entry
*entry
= list
;
2447 * Ensure we observe the unqueue before we issue the wakeup,
2448 * so that we won't be waiting forever.
2449 * -- see perf_not_pending().
2460 static inline int perf_not_pending(struct perf_event
*event
)
2463 * If we flush on whatever cpu we run, there is a chance we don't
2467 __perf_pending_run();
2471 * Ensure we see the proper queue state before going to sleep
2472 * so that we do not miss the wakeup. -- see perf_pending_handle()
2475 return event
->pending
.next
== NULL
;
2478 static void perf_pending_sync(struct perf_event
*event
)
2480 wait_event(event
->waitq
, perf_not_pending(event
));
2483 void perf_event_do_pending(void)
2485 __perf_pending_run();
2489 * Callchain support -- arch specific
2492 __weak
struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
2500 static bool perf_output_space(struct perf_mmap_data
*data
, unsigned long tail
,
2501 unsigned long offset
, unsigned long head
)
2505 if (!data
->writable
)
2508 mask
= (data
->nr_pages
<< PAGE_SHIFT
) - 1;
2510 offset
= (offset
- tail
) & mask
;
2511 head
= (head
- tail
) & mask
;
2513 if ((int)(head
- offset
) < 0)
2519 static void perf_output_wakeup(struct perf_output_handle
*handle
)
2521 atomic_set(&handle
->data
->poll
, POLL_IN
);
2524 handle
->event
->pending_wakeup
= 1;
2525 perf_pending_queue(&handle
->event
->pending
,
2526 perf_pending_event
);
2528 perf_event_wakeup(handle
->event
);
2532 * Curious locking construct.
2534 * We need to ensure a later event_id doesn't publish a head when a former
2535 * event_id isn't done writing. However since we need to deal with NMIs we
2536 * cannot fully serialize things.
2538 * What we do is serialize between CPUs so we only have to deal with NMI
2539 * nesting on a single CPU.
2541 * We only publish the head (and generate a wakeup) when the outer-most
2542 * event_id completes.
2544 static void perf_output_lock(struct perf_output_handle
*handle
)
2546 struct perf_mmap_data
*data
= handle
->data
;
2551 local_irq_save(handle
->flags
);
2552 cpu
= smp_processor_id();
2554 if (in_nmi() && atomic_read(&data
->lock
) == cpu
)
2557 while (atomic_cmpxchg(&data
->lock
, -1, cpu
) != -1)
2563 static void perf_output_unlock(struct perf_output_handle
*handle
)
2565 struct perf_mmap_data
*data
= handle
->data
;
2569 data
->done_head
= data
->head
;
2571 if (!handle
->locked
)
2576 * The xchg implies a full barrier that ensures all writes are done
2577 * before we publish the new head, matched by a rmb() in userspace when
2578 * reading this position.
2580 while ((head
= atomic_long_xchg(&data
->done_head
, 0)))
2581 data
->user_page
->data_head
= head
;
2584 * NMI can happen here, which means we can miss a done_head update.
2587 cpu
= atomic_xchg(&data
->lock
, -1);
2588 WARN_ON_ONCE(cpu
!= smp_processor_id());
2591 * Therefore we have to validate we did not indeed do so.
2593 if (unlikely(atomic_long_read(&data
->done_head
))) {
2595 * Since we had it locked, we can lock it again.
2597 while (atomic_cmpxchg(&data
->lock
, -1, cpu
) != -1)
2603 if (atomic_xchg(&data
->wakeup
, 0))
2604 perf_output_wakeup(handle
);
2606 local_irq_restore(handle
->flags
);
2609 void perf_output_copy(struct perf_output_handle
*handle
,
2610 const void *buf
, unsigned int len
)
2612 unsigned int pages_mask
;
2613 unsigned int offset
;
2617 offset
= handle
->offset
;
2618 pages_mask
= handle
->data
->nr_pages
- 1;
2619 pages
= handle
->data
->data_pages
;
2622 unsigned int page_offset
;
2625 nr
= (offset
>> PAGE_SHIFT
) & pages_mask
;
2626 page_offset
= offset
& (PAGE_SIZE
- 1);
2627 size
= min_t(unsigned int, PAGE_SIZE
- page_offset
, len
);
2629 memcpy(pages
[nr
] + page_offset
, buf
, size
);
2636 handle
->offset
= offset
;
2639 * Check we didn't copy past our reservation window, taking the
2640 * possible unsigned int wrap into account.
2642 WARN_ON_ONCE(((long)(handle
->head
- handle
->offset
)) < 0);
2645 int perf_output_begin(struct perf_output_handle
*handle
,
2646 struct perf_event
*event
, unsigned int size
,
2647 int nmi
, int sample
)
2649 struct perf_event
*output_event
;
2650 struct perf_mmap_data
*data
;
2651 unsigned long tail
, offset
, head
;
2654 struct perf_event_header header
;
2661 * For inherited events we send all the output towards the parent.
2664 event
= event
->parent
;
2666 output_event
= rcu_dereference(event
->output
);
2668 event
= output_event
;
2670 data
= rcu_dereference(event
->data
);
2674 handle
->data
= data
;
2675 handle
->event
= event
;
2677 handle
->sample
= sample
;
2679 if (!data
->nr_pages
)
2682 have_lost
= atomic_read(&data
->lost
);
2684 size
+= sizeof(lost_event
);
2686 perf_output_lock(handle
);
2690 * Userspace could choose to issue a mb() before updating the
2691 * tail pointer. So that all reads will be completed before the
2694 tail
= ACCESS_ONCE(data
->user_page
->data_tail
);
2696 offset
= head
= atomic_long_read(&data
->head
);
2698 if (unlikely(!perf_output_space(data
, tail
, offset
, head
)))
2700 } while (atomic_long_cmpxchg(&data
->head
, offset
, head
) != offset
);
2702 handle
->offset
= offset
;
2703 handle
->head
= head
;
2705 if (head
- tail
> data
->watermark
)
2706 atomic_set(&data
->wakeup
, 1);
2709 lost_event
.header
.type
= PERF_RECORD_LOST
;
2710 lost_event
.header
.misc
= 0;
2711 lost_event
.header
.size
= sizeof(lost_event
);
2712 lost_event
.id
= event
->id
;
2713 lost_event
.lost
= atomic_xchg(&data
->lost
, 0);
2715 perf_output_put(handle
, lost_event
);
2721 atomic_inc(&data
->lost
);
2722 perf_output_unlock(handle
);
2729 void perf_output_end(struct perf_output_handle
*handle
)
2731 struct perf_event
*event
= handle
->event
;
2732 struct perf_mmap_data
*data
= handle
->data
;
2734 int wakeup_events
= event
->attr
.wakeup_events
;
2736 if (handle
->sample
&& wakeup_events
) {
2737 int events
= atomic_inc_return(&data
->events
);
2738 if (events
>= wakeup_events
) {
2739 atomic_sub(wakeup_events
, &data
->events
);
2740 atomic_set(&data
->wakeup
, 1);
2744 perf_output_unlock(handle
);
2748 static u32
perf_event_pid(struct perf_event
*event
, struct task_struct
*p
)
2751 * only top level events have the pid namespace they were created in
2754 event
= event
->parent
;
2756 return task_tgid_nr_ns(p
, event
->ns
);
2759 static u32
perf_event_tid(struct perf_event
*event
, struct task_struct
*p
)
2762 * only top level events have the pid namespace they were created in
2765 event
= event
->parent
;
2767 return task_pid_nr_ns(p
, event
->ns
);
2770 static void perf_output_read_one(struct perf_output_handle
*handle
,
2771 struct perf_event
*event
)
2773 u64 read_format
= event
->attr
.read_format
;
2777 values
[n
++] = atomic64_read(&event
->count
);
2778 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
2779 values
[n
++] = event
->total_time_enabled
+
2780 atomic64_read(&event
->child_total_time_enabled
);
2782 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
2783 values
[n
++] = event
->total_time_running
+
2784 atomic64_read(&event
->child_total_time_running
);
2786 if (read_format
& PERF_FORMAT_ID
)
2787 values
[n
++] = primary_event_id(event
);
2789 perf_output_copy(handle
, values
, n
* sizeof(u64
));
2793 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2795 static void perf_output_read_group(struct perf_output_handle
*handle
,
2796 struct perf_event
*event
)
2798 struct perf_event
*leader
= event
->group_leader
, *sub
;
2799 u64 read_format
= event
->attr
.read_format
;
2803 values
[n
++] = 1 + leader
->nr_siblings
;
2805 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2806 values
[n
++] = leader
->total_time_enabled
;
2808 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2809 values
[n
++] = leader
->total_time_running
;
2811 if (leader
!= event
)
2812 leader
->pmu
->read(leader
);
2814 values
[n
++] = atomic64_read(&leader
->count
);
2815 if (read_format
& PERF_FORMAT_ID
)
2816 values
[n
++] = primary_event_id(leader
);
2818 perf_output_copy(handle
, values
, n
* sizeof(u64
));
2820 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
2824 sub
->pmu
->read(sub
);
2826 values
[n
++] = atomic64_read(&sub
->count
);
2827 if (read_format
& PERF_FORMAT_ID
)
2828 values
[n
++] = primary_event_id(sub
);
2830 perf_output_copy(handle
, values
, n
* sizeof(u64
));
2834 static void perf_output_read(struct perf_output_handle
*handle
,
2835 struct perf_event
*event
)
2837 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
)
2838 perf_output_read_group(handle
, event
);
2840 perf_output_read_one(handle
, event
);
2843 void perf_output_sample(struct perf_output_handle
*handle
,
2844 struct perf_event_header
*header
,
2845 struct perf_sample_data
*data
,
2846 struct perf_event
*event
)
2848 u64 sample_type
= data
->type
;
2850 perf_output_put(handle
, *header
);
2852 if (sample_type
& PERF_SAMPLE_IP
)
2853 perf_output_put(handle
, data
->ip
);
2855 if (sample_type
& PERF_SAMPLE_TID
)
2856 perf_output_put(handle
, data
->tid_entry
);
2858 if (sample_type
& PERF_SAMPLE_TIME
)
2859 perf_output_put(handle
, data
->time
);
2861 if (sample_type
& PERF_SAMPLE_ADDR
)
2862 perf_output_put(handle
, data
->addr
);
2864 if (sample_type
& PERF_SAMPLE_ID
)
2865 perf_output_put(handle
, data
->id
);
2867 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
2868 perf_output_put(handle
, data
->stream_id
);
2870 if (sample_type
& PERF_SAMPLE_CPU
)
2871 perf_output_put(handle
, data
->cpu_entry
);
2873 if (sample_type
& PERF_SAMPLE_PERIOD
)
2874 perf_output_put(handle
, data
->period
);
2876 if (sample_type
& PERF_SAMPLE_READ
)
2877 perf_output_read(handle
, event
);
2879 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
2880 if (data
->callchain
) {
2883 if (data
->callchain
)
2884 size
+= data
->callchain
->nr
;
2886 size
*= sizeof(u64
);
2888 perf_output_copy(handle
, data
->callchain
, size
);
2891 perf_output_put(handle
, nr
);
2895 if (sample_type
& PERF_SAMPLE_RAW
) {
2897 perf_output_put(handle
, data
->raw
->size
);
2898 perf_output_copy(handle
, data
->raw
->data
,
2905 .size
= sizeof(u32
),
2908 perf_output_put(handle
, raw
);
2913 void perf_prepare_sample(struct perf_event_header
*header
,
2914 struct perf_sample_data
*data
,
2915 struct perf_event
*event
,
2916 struct pt_regs
*regs
)
2918 u64 sample_type
= event
->attr
.sample_type
;
2920 data
->type
= sample_type
;
2922 header
->type
= PERF_RECORD_SAMPLE
;
2923 header
->size
= sizeof(*header
);
2926 header
->misc
|= perf_misc_flags(regs
);
2928 if (sample_type
& PERF_SAMPLE_IP
) {
2929 data
->ip
= perf_instruction_pointer(regs
);
2931 header
->size
+= sizeof(data
->ip
);
2934 if (sample_type
& PERF_SAMPLE_TID
) {
2935 /* namespace issues */
2936 data
->tid_entry
.pid
= perf_event_pid(event
, current
);
2937 data
->tid_entry
.tid
= perf_event_tid(event
, current
);
2939 header
->size
+= sizeof(data
->tid_entry
);
2942 if (sample_type
& PERF_SAMPLE_TIME
) {
2943 data
->time
= perf_clock();
2945 header
->size
+= sizeof(data
->time
);
2948 if (sample_type
& PERF_SAMPLE_ADDR
)
2949 header
->size
+= sizeof(data
->addr
);
2951 if (sample_type
& PERF_SAMPLE_ID
) {
2952 data
->id
= primary_event_id(event
);
2954 header
->size
+= sizeof(data
->id
);
2957 if (sample_type
& PERF_SAMPLE_STREAM_ID
) {
2958 data
->stream_id
= event
->id
;
2960 header
->size
+= sizeof(data
->stream_id
);
2963 if (sample_type
& PERF_SAMPLE_CPU
) {
2964 data
->cpu_entry
.cpu
= raw_smp_processor_id();
2965 data
->cpu_entry
.reserved
= 0;
2967 header
->size
+= sizeof(data
->cpu_entry
);
2970 if (sample_type
& PERF_SAMPLE_PERIOD
)
2971 header
->size
+= sizeof(data
->period
);
2973 if (sample_type
& PERF_SAMPLE_READ
)
2974 header
->size
+= perf_event_read_size(event
);
2976 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
2979 data
->callchain
= perf_callchain(regs
);
2981 if (data
->callchain
)
2982 size
+= data
->callchain
->nr
;
2984 header
->size
+= size
* sizeof(u64
);
2987 if (sample_type
& PERF_SAMPLE_RAW
) {
2988 int size
= sizeof(u32
);
2991 size
+= data
->raw
->size
;
2993 size
+= sizeof(u32
);
2995 WARN_ON_ONCE(size
& (sizeof(u64
)-1));
2996 header
->size
+= size
;
3000 static void perf_event_output(struct perf_event
*event
, int nmi
,
3001 struct perf_sample_data
*data
,
3002 struct pt_regs
*regs
)
3004 struct perf_output_handle handle
;
3005 struct perf_event_header header
;
3007 perf_prepare_sample(&header
, data
, event
, regs
);
3009 if (perf_output_begin(&handle
, event
, header
.size
, nmi
, 1))
3012 perf_output_sample(&handle
, &header
, data
, event
);
3014 perf_output_end(&handle
);
3021 struct perf_read_event
{
3022 struct perf_event_header header
;
3029 perf_event_read_event(struct perf_event
*event
,
3030 struct task_struct
*task
)
3032 struct perf_output_handle handle
;
3033 struct perf_read_event read_event
= {
3035 .type
= PERF_RECORD_READ
,
3037 .size
= sizeof(read_event
) + perf_event_read_size(event
),
3039 .pid
= perf_event_pid(event
, task
),
3040 .tid
= perf_event_tid(event
, task
),
3044 ret
= perf_output_begin(&handle
, event
, read_event
.header
.size
, 0, 0);
3048 perf_output_put(&handle
, read_event
);
3049 perf_output_read(&handle
, event
);
3051 perf_output_end(&handle
);
3055 * task tracking -- fork/exit
3057 * enabled by: attr.comm | attr.mmap | attr.task
3060 struct perf_task_event
{
3061 struct task_struct
*task
;
3062 struct perf_event_context
*task_ctx
;
3065 struct perf_event_header header
;
3075 static void perf_event_task_output(struct perf_event
*event
,
3076 struct perf_task_event
*task_event
)
3078 struct perf_output_handle handle
;
3080 struct task_struct
*task
= task_event
->task
;
3083 size
= task_event
->event_id
.header
.size
;
3084 ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3089 task_event
->event_id
.pid
= perf_event_pid(event
, task
);
3090 task_event
->event_id
.ppid
= perf_event_pid(event
, current
);
3092 task_event
->event_id
.tid
= perf_event_tid(event
, task
);
3093 task_event
->event_id
.ptid
= perf_event_tid(event
, current
);
3095 task_event
->event_id
.time
= perf_clock();
3097 perf_output_put(&handle
, task_event
->event_id
);
3099 perf_output_end(&handle
);
3102 static int perf_event_task_match(struct perf_event
*event
)
3104 if (event
->attr
.comm
|| event
->attr
.mmap
|| event
->attr
.task
)
3110 static void perf_event_task_ctx(struct perf_event_context
*ctx
,
3111 struct perf_task_event
*task_event
)
3113 struct perf_event
*event
;
3115 if (system_state
!= SYSTEM_RUNNING
|| list_empty(&ctx
->event_list
))
3119 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3120 if (perf_event_task_match(event
))
3121 perf_event_task_output(event
, task_event
);
3126 static void perf_event_task_event(struct perf_task_event
*task_event
)
3128 struct perf_cpu_context
*cpuctx
;
3129 struct perf_event_context
*ctx
= task_event
->task_ctx
;
3131 cpuctx
= &get_cpu_var(perf_cpu_context
);
3132 perf_event_task_ctx(&cpuctx
->ctx
, task_event
);
3133 put_cpu_var(perf_cpu_context
);
3137 ctx
= rcu_dereference(task_event
->task
->perf_event_ctxp
);
3139 perf_event_task_ctx(ctx
, task_event
);
3143 static void perf_event_task(struct task_struct
*task
,
3144 struct perf_event_context
*task_ctx
,
3147 struct perf_task_event task_event
;
3149 if (!atomic_read(&nr_comm_events
) &&
3150 !atomic_read(&nr_mmap_events
) &&
3151 !atomic_read(&nr_task_events
))
3154 task_event
= (struct perf_task_event
){
3156 .task_ctx
= task_ctx
,
3159 .type
= new ? PERF_RECORD_FORK
: PERF_RECORD_EXIT
,
3161 .size
= sizeof(task_event
.event_id
),
3170 perf_event_task_event(&task_event
);
3173 void perf_event_fork(struct task_struct
*task
)
3175 perf_event_task(task
, NULL
, 1);
3182 struct perf_comm_event
{
3183 struct task_struct
*task
;
3188 struct perf_event_header header
;
3195 static void perf_event_comm_output(struct perf_event
*event
,
3196 struct perf_comm_event
*comm_event
)
3198 struct perf_output_handle handle
;
3199 int size
= comm_event
->event_id
.header
.size
;
3200 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3205 comm_event
->event_id
.pid
= perf_event_pid(event
, comm_event
->task
);
3206 comm_event
->event_id
.tid
= perf_event_tid(event
, comm_event
->task
);
3208 perf_output_put(&handle
, comm_event
->event_id
);
3209 perf_output_copy(&handle
, comm_event
->comm
,
3210 comm_event
->comm_size
);
3211 perf_output_end(&handle
);
3214 static int perf_event_comm_match(struct perf_event
*event
)
3216 if (event
->attr
.comm
)
3222 static void perf_event_comm_ctx(struct perf_event_context
*ctx
,
3223 struct perf_comm_event
*comm_event
)
3225 struct perf_event
*event
;
3227 if (system_state
!= SYSTEM_RUNNING
|| list_empty(&ctx
->event_list
))
3231 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3232 if (perf_event_comm_match(event
))
3233 perf_event_comm_output(event
, comm_event
);
3238 static void perf_event_comm_event(struct perf_comm_event
*comm_event
)
3240 struct perf_cpu_context
*cpuctx
;
3241 struct perf_event_context
*ctx
;
3243 char comm
[TASK_COMM_LEN
];
3245 memset(comm
, 0, sizeof(comm
));
3246 strncpy(comm
, comm_event
->task
->comm
, sizeof(comm
));
3247 size
= ALIGN(strlen(comm
)+1, sizeof(u64
));
3249 comm_event
->comm
= comm
;
3250 comm_event
->comm_size
= size
;
3252 comm_event
->event_id
.header
.size
= sizeof(comm_event
->event_id
) + size
;
3254 cpuctx
= &get_cpu_var(perf_cpu_context
);
3255 perf_event_comm_ctx(&cpuctx
->ctx
, comm_event
);
3256 put_cpu_var(perf_cpu_context
);
3260 * doesn't really matter which of the child contexts the
3261 * events ends up in.
3263 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3265 perf_event_comm_ctx(ctx
, comm_event
);
3269 void perf_event_comm(struct task_struct
*task
)
3271 struct perf_comm_event comm_event
;
3273 if (task
->perf_event_ctxp
)
3274 perf_event_enable_on_exec(task
);
3276 if (!atomic_read(&nr_comm_events
))
3279 comm_event
= (struct perf_comm_event
){
3285 .type
= PERF_RECORD_COMM
,
3294 perf_event_comm_event(&comm_event
);
3301 struct perf_mmap_event
{
3302 struct vm_area_struct
*vma
;
3304 const char *file_name
;
3308 struct perf_event_header header
;
3318 static void perf_event_mmap_output(struct perf_event
*event
,
3319 struct perf_mmap_event
*mmap_event
)
3321 struct perf_output_handle handle
;
3322 int size
= mmap_event
->event_id
.header
.size
;
3323 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3328 mmap_event
->event_id
.pid
= perf_event_pid(event
, current
);
3329 mmap_event
->event_id
.tid
= perf_event_tid(event
, current
);
3331 perf_output_put(&handle
, mmap_event
->event_id
);
3332 perf_output_copy(&handle
, mmap_event
->file_name
,
3333 mmap_event
->file_size
);
3334 perf_output_end(&handle
);
3337 static int perf_event_mmap_match(struct perf_event
*event
,
3338 struct perf_mmap_event
*mmap_event
)
3340 if (event
->attr
.mmap
)
3346 static void perf_event_mmap_ctx(struct perf_event_context
*ctx
,
3347 struct perf_mmap_event
*mmap_event
)
3349 struct perf_event
*event
;
3351 if (system_state
!= SYSTEM_RUNNING
|| list_empty(&ctx
->event_list
))
3355 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3356 if (perf_event_mmap_match(event
, mmap_event
))
3357 perf_event_mmap_output(event
, mmap_event
);
3362 static void perf_event_mmap_event(struct perf_mmap_event
*mmap_event
)
3364 struct perf_cpu_context
*cpuctx
;
3365 struct perf_event_context
*ctx
;
3366 struct vm_area_struct
*vma
= mmap_event
->vma
;
3367 struct file
*file
= vma
->vm_file
;
3373 memset(tmp
, 0, sizeof(tmp
));
3377 * d_path works from the end of the buffer backwards, so we
3378 * need to add enough zero bytes after the string to handle
3379 * the 64bit alignment we do later.
3381 buf
= kzalloc(PATH_MAX
+ sizeof(u64
), GFP_KERNEL
);
3383 name
= strncpy(tmp
, "//enomem", sizeof(tmp
));
3386 name
= d_path(&file
->f_path
, buf
, PATH_MAX
);
3388 name
= strncpy(tmp
, "//toolong", sizeof(tmp
));
3392 if (arch_vma_name(mmap_event
->vma
)) {
3393 name
= strncpy(tmp
, arch_vma_name(mmap_event
->vma
),
3399 name
= strncpy(tmp
, "[vdso]", sizeof(tmp
));
3403 name
= strncpy(tmp
, "//anon", sizeof(tmp
));
3408 size
= ALIGN(strlen(name
)+1, sizeof(u64
));
3410 mmap_event
->file_name
= name
;
3411 mmap_event
->file_size
= size
;
3413 mmap_event
->event_id
.header
.size
= sizeof(mmap_event
->event_id
) + size
;
3415 cpuctx
= &get_cpu_var(perf_cpu_context
);
3416 perf_event_mmap_ctx(&cpuctx
->ctx
, mmap_event
);
3417 put_cpu_var(perf_cpu_context
);
3421 * doesn't really matter which of the child contexts the
3422 * events ends up in.
3424 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3426 perf_event_mmap_ctx(ctx
, mmap_event
);
3432 void __perf_event_mmap(struct vm_area_struct
*vma
)
3434 struct perf_mmap_event mmap_event
;
3436 if (!atomic_read(&nr_mmap_events
))
3439 mmap_event
= (struct perf_mmap_event
){
3445 .type
= PERF_RECORD_MMAP
,
3451 .start
= vma
->vm_start
,
3452 .len
= vma
->vm_end
- vma
->vm_start
,
3453 .pgoff
= vma
->vm_pgoff
,
3457 perf_event_mmap_event(&mmap_event
);
3461 * IRQ throttle logging
3464 static void perf_log_throttle(struct perf_event
*event
, int enable
)
3466 struct perf_output_handle handle
;
3470 struct perf_event_header header
;
3474 } throttle_event
= {
3476 .type
= PERF_RECORD_THROTTLE
,
3478 .size
= sizeof(throttle_event
),
3480 .time
= perf_clock(),
3481 .id
= primary_event_id(event
),
3482 .stream_id
= event
->id
,
3486 throttle_event
.header
.type
= PERF_RECORD_UNTHROTTLE
;
3488 ret
= perf_output_begin(&handle
, event
, sizeof(throttle_event
), 1, 0);
3492 perf_output_put(&handle
, throttle_event
);
3493 perf_output_end(&handle
);
3497 * Generic event overflow handling, sampling.
3500 static int __perf_event_overflow(struct perf_event
*event
, int nmi
,
3501 int throttle
, struct perf_sample_data
*data
,
3502 struct pt_regs
*regs
)
3504 int events
= atomic_read(&event
->event_limit
);
3505 struct hw_perf_event
*hwc
= &event
->hw
;
3508 throttle
= (throttle
&& event
->pmu
->unthrottle
!= NULL
);
3513 if (hwc
->interrupts
!= MAX_INTERRUPTS
) {
3515 if (HZ
* hwc
->interrupts
>
3516 (u64
)sysctl_perf_event_sample_rate
) {
3517 hwc
->interrupts
= MAX_INTERRUPTS
;
3518 perf_log_throttle(event
, 0);
3523 * Keep re-disabling events even though on the previous
3524 * pass we disabled it - just in case we raced with a
3525 * sched-in and the event got enabled again:
3531 if (event
->attr
.freq
) {
3532 u64 now
= perf_clock();
3533 s64 delta
= now
- hwc
->freq_stamp
;
3535 hwc
->freq_stamp
= now
;
3537 if (delta
> 0 && delta
< TICK_NSEC
)
3538 perf_adjust_period(event
, NSEC_PER_SEC
/ (int)delta
);
3542 * XXX event_limit might not quite work as expected on inherited
3546 event
->pending_kill
= POLL_IN
;
3547 if (events
&& atomic_dec_and_test(&event
->event_limit
)) {
3549 event
->pending_kill
= POLL_HUP
;
3551 event
->pending_disable
= 1;
3552 perf_pending_queue(&event
->pending
,
3553 perf_pending_event
);
3555 perf_event_disable(event
);
3558 perf_event_output(event
, nmi
, data
, regs
);
3562 int perf_event_overflow(struct perf_event
*event
, int nmi
,
3563 struct perf_sample_data
*data
,
3564 struct pt_regs
*regs
)
3566 return __perf_event_overflow(event
, nmi
, 1, data
, regs
);
3570 * Generic software event infrastructure
3574 * We directly increment event->count and keep a second value in
3575 * event->hw.period_left to count intervals. This period event
3576 * is kept in the range [-sample_period, 0] so that we can use the
3580 static u64
perf_swevent_set_period(struct perf_event
*event
)
3582 struct hw_perf_event
*hwc
= &event
->hw
;
3583 u64 period
= hwc
->last_period
;
3587 hwc
->last_period
= hwc
->sample_period
;
3590 old
= val
= atomic64_read(&hwc
->period_left
);
3594 nr
= div64_u64(period
+ val
, period
);
3595 offset
= nr
* period
;
3597 if (atomic64_cmpxchg(&hwc
->period_left
, old
, val
) != old
)
3603 static void perf_swevent_overflow(struct perf_event
*event
,
3604 int nmi
, struct perf_sample_data
*data
,
3605 struct pt_regs
*regs
)
3607 struct hw_perf_event
*hwc
= &event
->hw
;
3611 data
->period
= event
->hw
.last_period
;
3612 overflow
= perf_swevent_set_period(event
);
3614 if (hwc
->interrupts
== MAX_INTERRUPTS
)
3617 for (; overflow
; overflow
--) {
3618 if (__perf_event_overflow(event
, nmi
, throttle
,
3621 * We inhibit the overflow from happening when
3622 * hwc->interrupts == MAX_INTERRUPTS.
3630 static void perf_swevent_unthrottle(struct perf_event
*event
)
3633 * Nothing to do, we already reset hwc->interrupts.
3637 static void perf_swevent_add(struct perf_event
*event
, u64 nr
,
3638 int nmi
, struct perf_sample_data
*data
,
3639 struct pt_regs
*regs
)
3641 struct hw_perf_event
*hwc
= &event
->hw
;
3643 atomic64_add(nr
, &event
->count
);
3645 if (!hwc
->sample_period
)
3651 if (!atomic64_add_negative(nr
, &hwc
->period_left
))
3652 perf_swevent_overflow(event
, nmi
, data
, regs
);
3655 static int perf_swevent_is_counting(struct perf_event
*event
)
3658 * The event is active, we're good!
3660 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
3664 * The event is off/error, not counting.
3666 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
)
3670 * The event is inactive, if the context is active
3671 * we're part of a group that didn't make it on the 'pmu',
3674 if (event
->ctx
->is_active
)
3678 * We're inactive and the context is too, this means the
3679 * task is scheduled out, we're counting events that happen
3680 * to us, like migration events.
3685 static int perf_swevent_match(struct perf_event
*event
,
3686 enum perf_type_id type
,
3687 u32 event_id
, struct pt_regs
*regs
)
3689 if (!perf_swevent_is_counting(event
))
3692 if (event
->attr
.type
!= type
)
3694 if (event
->attr
.config
!= event_id
)
3698 if (event
->attr
.exclude_user
&& user_mode(regs
))
3701 if (event
->attr
.exclude_kernel
&& !user_mode(regs
))
3708 static void perf_swevent_ctx_event(struct perf_event_context
*ctx
,
3709 enum perf_type_id type
,
3710 u32 event_id
, u64 nr
, int nmi
,
3711 struct perf_sample_data
*data
,
3712 struct pt_regs
*regs
)
3714 struct perf_event
*event
;
3716 if (system_state
!= SYSTEM_RUNNING
|| list_empty(&ctx
->event_list
))
3720 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3721 if (perf_swevent_match(event
, type
, event_id
, regs
))
3722 perf_swevent_add(event
, nr
, nmi
, data
, regs
);
3727 static int *perf_swevent_recursion_context(struct perf_cpu_context
*cpuctx
)
3730 return &cpuctx
->recursion
[3];
3733 return &cpuctx
->recursion
[2];
3736 return &cpuctx
->recursion
[1];
3738 return &cpuctx
->recursion
[0];
3741 static void do_perf_sw_event(enum perf_type_id type
, u32 event_id
,
3743 struct perf_sample_data
*data
,
3744 struct pt_regs
*regs
)
3746 struct perf_cpu_context
*cpuctx
= &get_cpu_var(perf_cpu_context
);
3747 int *recursion
= perf_swevent_recursion_context(cpuctx
);
3748 struct perf_event_context
*ctx
;
3756 perf_swevent_ctx_event(&cpuctx
->ctx
, type
, event_id
,
3757 nr
, nmi
, data
, regs
);
3760 * doesn't really matter which of the child contexts the
3761 * events ends up in.
3763 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3765 perf_swevent_ctx_event(ctx
, type
, event_id
, nr
, nmi
, data
, regs
);
3772 put_cpu_var(perf_cpu_context
);
3775 void __perf_sw_event(u32 event_id
, u64 nr
, int nmi
,
3776 struct pt_regs
*regs
, u64 addr
)
3778 struct perf_sample_data data
= {
3782 do_perf_sw_event(PERF_TYPE_SOFTWARE
, event_id
, nr
, nmi
,
3786 static void perf_swevent_read(struct perf_event
*event
)
3790 static int perf_swevent_enable(struct perf_event
*event
)
3792 struct hw_perf_event
*hwc
= &event
->hw
;
3794 if (hwc
->sample_period
) {
3795 hwc
->last_period
= hwc
->sample_period
;
3796 perf_swevent_set_period(event
);
3801 static void perf_swevent_disable(struct perf_event
*event
)
3805 static const struct pmu perf_ops_generic
= {
3806 .enable
= perf_swevent_enable
,
3807 .disable
= perf_swevent_disable
,
3808 .read
= perf_swevent_read
,
3809 .unthrottle
= perf_swevent_unthrottle
,
3813 * hrtimer based swevent callback
3816 static enum hrtimer_restart
perf_swevent_hrtimer(struct hrtimer
*hrtimer
)
3818 enum hrtimer_restart ret
= HRTIMER_RESTART
;
3819 struct perf_sample_data data
;
3820 struct pt_regs
*regs
;
3821 struct perf_event
*event
;
3824 event
= container_of(hrtimer
, struct perf_event
, hw
.hrtimer
);
3825 event
->pmu
->read(event
);
3828 regs
= get_irq_regs();
3830 * In case we exclude kernel IPs or are somehow not in interrupt
3831 * context, provide the next best thing, the user IP.
3833 if ((event
->attr
.exclude_kernel
|| !regs
) &&
3834 !event
->attr
.exclude_user
)
3835 regs
= task_pt_regs(current
);
3838 if (perf_event_overflow(event
, 0, &data
, regs
))
3839 ret
= HRTIMER_NORESTART
;
3842 period
= max_t(u64
, 10000, event
->hw
.sample_period
);
3843 hrtimer_forward_now(hrtimer
, ns_to_ktime(period
));
3849 * Software event: cpu wall time clock
3852 static void cpu_clock_perf_event_update(struct perf_event
*event
)
3854 int cpu
= raw_smp_processor_id();
3858 now
= cpu_clock(cpu
);
3859 prev
= atomic64_read(&event
->hw
.prev_count
);
3860 atomic64_set(&event
->hw
.prev_count
, now
);
3861 atomic64_add(now
- prev
, &event
->count
);
3864 static int cpu_clock_perf_event_enable(struct perf_event
*event
)
3866 struct hw_perf_event
*hwc
= &event
->hw
;
3867 int cpu
= raw_smp_processor_id();
3869 atomic64_set(&hwc
->prev_count
, cpu_clock(cpu
));
3870 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3871 hwc
->hrtimer
.function
= perf_swevent_hrtimer
;
3872 if (hwc
->sample_period
) {
3873 u64 period
= max_t(u64
, 10000, hwc
->sample_period
);
3874 __hrtimer_start_range_ns(&hwc
->hrtimer
,
3875 ns_to_ktime(period
), 0,
3876 HRTIMER_MODE_REL
, 0);
3882 static void cpu_clock_perf_event_disable(struct perf_event
*event
)
3884 if (event
->hw
.sample_period
)
3885 hrtimer_cancel(&event
->hw
.hrtimer
);
3886 cpu_clock_perf_event_update(event
);
3889 static void cpu_clock_perf_event_read(struct perf_event
*event
)
3891 cpu_clock_perf_event_update(event
);
3894 static const struct pmu perf_ops_cpu_clock
= {
3895 .enable
= cpu_clock_perf_event_enable
,
3896 .disable
= cpu_clock_perf_event_disable
,
3897 .read
= cpu_clock_perf_event_read
,
3901 * Software event: task time clock
3904 static void task_clock_perf_event_update(struct perf_event
*event
, u64 now
)
3909 prev
= atomic64_xchg(&event
->hw
.prev_count
, now
);
3911 atomic64_add(delta
, &event
->count
);
3914 static int task_clock_perf_event_enable(struct perf_event
*event
)
3916 struct hw_perf_event
*hwc
= &event
->hw
;
3919 now
= event
->ctx
->time
;
3921 atomic64_set(&hwc
->prev_count
, now
);
3922 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
3923 hwc
->hrtimer
.function
= perf_swevent_hrtimer
;
3924 if (hwc
->sample_period
) {
3925 u64 period
= max_t(u64
, 10000, hwc
->sample_period
);
3926 __hrtimer_start_range_ns(&hwc
->hrtimer
,
3927 ns_to_ktime(period
), 0,
3928 HRTIMER_MODE_REL
, 0);
3934 static void task_clock_perf_event_disable(struct perf_event
*event
)
3936 if (event
->hw
.sample_period
)
3937 hrtimer_cancel(&event
->hw
.hrtimer
);
3938 task_clock_perf_event_update(event
, event
->ctx
->time
);
3942 static void task_clock_perf_event_read(struct perf_event
*event
)
3947 update_context_time(event
->ctx
);
3948 time
= event
->ctx
->time
;
3950 u64 now
= perf_clock();
3951 u64 delta
= now
- event
->ctx
->timestamp
;
3952 time
= event
->ctx
->time
+ delta
;
3955 task_clock_perf_event_update(event
, time
);
3958 static const struct pmu perf_ops_task_clock
= {
3959 .enable
= task_clock_perf_event_enable
,
3960 .disable
= task_clock_perf_event_disable
,
3961 .read
= task_clock_perf_event_read
,
3964 #ifdef CONFIG_EVENT_PROFILE
3965 void perf_tp_event(int event_id
, u64 addr
, u64 count
, void *record
,
3968 struct perf_raw_record raw
= {
3973 struct perf_sample_data data
= {
3978 struct pt_regs
*regs
= get_irq_regs();
3981 regs
= task_pt_regs(current
);
3983 do_perf_sw_event(PERF_TYPE_TRACEPOINT
, event_id
, count
, 1,
3986 EXPORT_SYMBOL_GPL(perf_tp_event
);
3988 extern int ftrace_profile_enable(int);
3989 extern void ftrace_profile_disable(int);
3991 static void tp_perf_event_destroy(struct perf_event
*event
)
3993 ftrace_profile_disable(event
->attr
.config
);
3996 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
3999 * Raw tracepoint data is a severe data leak, only allow root to
4002 if ((event
->attr
.sample_type
& PERF_SAMPLE_RAW
) &&
4003 perf_paranoid_tracepoint_raw() &&
4004 !capable(CAP_SYS_ADMIN
))
4005 return ERR_PTR(-EPERM
);
4007 if (ftrace_profile_enable(event
->attr
.config
))
4010 event
->destroy
= tp_perf_event_destroy
;
4012 return &perf_ops_generic
;
4015 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
4021 atomic_t perf_swevent_enabled
[PERF_COUNT_SW_MAX
];
4023 static void sw_perf_event_destroy(struct perf_event
*event
)
4025 u64 event_id
= event
->attr
.config
;
4027 WARN_ON(event
->parent
);
4029 atomic_dec(&perf_swevent_enabled
[event_id
]);
4032 static const struct pmu
*sw_perf_event_init(struct perf_event
*event
)
4034 const struct pmu
*pmu
= NULL
;
4035 u64 event_id
= event
->attr
.config
;
4038 * Software events (currently) can't in general distinguish
4039 * between user, kernel and hypervisor events.
4040 * However, context switches and cpu migrations are considered
4041 * to be kernel events, and page faults are never hypervisor
4045 case PERF_COUNT_SW_CPU_CLOCK
:
4046 pmu
= &perf_ops_cpu_clock
;
4049 case PERF_COUNT_SW_TASK_CLOCK
:
4051 * If the user instantiates this as a per-cpu event,
4052 * use the cpu_clock event instead.
4054 if (event
->ctx
->task
)
4055 pmu
= &perf_ops_task_clock
;
4057 pmu
= &perf_ops_cpu_clock
;
4060 case PERF_COUNT_SW_PAGE_FAULTS
:
4061 case PERF_COUNT_SW_PAGE_FAULTS_MIN
:
4062 case PERF_COUNT_SW_PAGE_FAULTS_MAJ
:
4063 case PERF_COUNT_SW_CONTEXT_SWITCHES
:
4064 case PERF_COUNT_SW_CPU_MIGRATIONS
:
4065 if (!event
->parent
) {
4066 atomic_inc(&perf_swevent_enabled
[event_id
]);
4067 event
->destroy
= sw_perf_event_destroy
;
4069 pmu
= &perf_ops_generic
;
4077 * Allocate and initialize a event structure
4079 static struct perf_event
*
4080 perf_event_alloc(struct perf_event_attr
*attr
,
4082 struct perf_event_context
*ctx
,
4083 struct perf_event
*group_leader
,
4084 struct perf_event
*parent_event
,
4087 const struct pmu
*pmu
;
4088 struct perf_event
*event
;
4089 struct hw_perf_event
*hwc
;
4092 event
= kzalloc(sizeof(*event
), gfpflags
);
4094 return ERR_PTR(-ENOMEM
);
4097 * Single events are their own group leaders, with an
4098 * empty sibling list:
4101 group_leader
= event
;
4103 mutex_init(&event
->child_mutex
);
4104 INIT_LIST_HEAD(&event
->child_list
);
4106 INIT_LIST_HEAD(&event
->group_entry
);
4107 INIT_LIST_HEAD(&event
->event_entry
);
4108 INIT_LIST_HEAD(&event
->sibling_list
);
4109 init_waitqueue_head(&event
->waitq
);
4111 mutex_init(&event
->mmap_mutex
);
4114 event
->attr
= *attr
;
4115 event
->group_leader
= group_leader
;
4120 event
->parent
= parent_event
;
4122 event
->ns
= get_pid_ns(current
->nsproxy
->pid_ns
);
4123 event
->id
= atomic64_inc_return(&perf_event_id
);
4125 event
->state
= PERF_EVENT_STATE_INACTIVE
;
4128 event
->state
= PERF_EVENT_STATE_OFF
;
4133 hwc
->sample_period
= attr
->sample_period
;
4134 if (attr
->freq
&& attr
->sample_freq
)
4135 hwc
->sample_period
= 1;
4136 hwc
->last_period
= hwc
->sample_period
;
4138 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
4141 * we currently do not support PERF_FORMAT_GROUP on inherited events
4143 if (attr
->inherit
&& (attr
->read_format
& PERF_FORMAT_GROUP
))
4146 switch (attr
->type
) {
4148 case PERF_TYPE_HARDWARE
:
4149 case PERF_TYPE_HW_CACHE
:
4150 pmu
= hw_perf_event_init(event
);
4153 case PERF_TYPE_SOFTWARE
:
4154 pmu
= sw_perf_event_init(event
);
4157 case PERF_TYPE_TRACEPOINT
:
4158 pmu
= tp_perf_event_init(event
);
4168 else if (IS_ERR(pmu
))
4173 put_pid_ns(event
->ns
);
4175 return ERR_PTR(err
);
4180 if (!event
->parent
) {
4181 atomic_inc(&nr_events
);
4182 if (event
->attr
.mmap
)
4183 atomic_inc(&nr_mmap_events
);
4184 if (event
->attr
.comm
)
4185 atomic_inc(&nr_comm_events
);
4186 if (event
->attr
.task
)
4187 atomic_inc(&nr_task_events
);
4193 static int perf_copy_attr(struct perf_event_attr __user
*uattr
,
4194 struct perf_event_attr
*attr
)
4199 if (!access_ok(VERIFY_WRITE
, uattr
, PERF_ATTR_SIZE_VER0
))
4203 * zero the full structure, so that a short copy will be nice.
4205 memset(attr
, 0, sizeof(*attr
));
4207 ret
= get_user(size
, &uattr
->size
);
4211 if (size
> PAGE_SIZE
) /* silly large */
4214 if (!size
) /* abi compat */
4215 size
= PERF_ATTR_SIZE_VER0
;
4217 if (size
< PERF_ATTR_SIZE_VER0
)
4221 * If we're handed a bigger struct than we know of,
4222 * ensure all the unknown bits are 0 - i.e. new
4223 * user-space does not rely on any kernel feature
4224 * extensions we dont know about yet.
4226 if (size
> sizeof(*attr
)) {
4227 unsigned char __user
*addr
;
4228 unsigned char __user
*end
;
4231 addr
= (void __user
*)uattr
+ sizeof(*attr
);
4232 end
= (void __user
*)uattr
+ size
;
4234 for (; addr
< end
; addr
++) {
4235 ret
= get_user(val
, addr
);
4241 size
= sizeof(*attr
);
4244 ret
= copy_from_user(attr
, uattr
, size
);
4249 * If the type exists, the corresponding creation will verify
4252 if (attr
->type
>= PERF_TYPE_MAX
)
4255 if (attr
->__reserved_1
|| attr
->__reserved_2
|| attr
->__reserved_3
)
4258 if (attr
->sample_type
& ~(PERF_SAMPLE_MAX
-1))
4261 if (attr
->read_format
& ~(PERF_FORMAT_MAX
-1))
4268 put_user(sizeof(*attr
), &uattr
->size
);
4273 int perf_event_set_output(struct perf_event
*event
, int output_fd
)
4275 struct perf_event
*output_event
= NULL
;
4276 struct file
*output_file
= NULL
;
4277 struct perf_event
*old_output
;
4278 int fput_needed
= 0;
4284 output_file
= fget_light(output_fd
, &fput_needed
);
4288 if (output_file
->f_op
!= &perf_fops
)
4291 output_event
= output_file
->private_data
;
4293 /* Don't chain output fds */
4294 if (output_event
->output
)
4297 /* Don't set an output fd when we already have an output channel */
4301 atomic_long_inc(&output_file
->f_count
);
4304 mutex_lock(&event
->mmap_mutex
);
4305 old_output
= event
->output
;
4306 rcu_assign_pointer(event
->output
, output_event
);
4307 mutex_unlock(&event
->mmap_mutex
);
4311 * we need to make sure no existing perf_output_*()
4312 * is still referencing this event.
4315 fput(old_output
->filp
);
4320 fput_light(output_file
, fput_needed
);
4325 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4327 * @attr_uptr: event_id type attributes for monitoring/sampling
4330 * @group_fd: group leader event fd
4332 SYSCALL_DEFINE5(perf_event_open
,
4333 struct perf_event_attr __user
*, attr_uptr
,
4334 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
4336 struct perf_event
*event
, *group_leader
;
4337 struct perf_event_attr attr
;
4338 struct perf_event_context
*ctx
;
4339 struct file
*event_file
= NULL
;
4340 struct file
*group_file
= NULL
;
4341 int fput_needed
= 0;
4342 int fput_needed2
= 0;
4345 /* for future expandability... */
4346 if (flags
& ~(PERF_FLAG_FD_NO_GROUP
| PERF_FLAG_FD_OUTPUT
))
4349 err
= perf_copy_attr(attr_uptr
, &attr
);
4353 if (!attr
.exclude_kernel
) {
4354 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
))
4359 if (attr
.sample_freq
> sysctl_perf_event_sample_rate
)
4364 * Get the target context (task or percpu):
4366 ctx
= find_get_context(pid
, cpu
);
4368 return PTR_ERR(ctx
);
4371 * Look up the group leader (we will attach this event to it):
4373 group_leader
= NULL
;
4374 if (group_fd
!= -1 && !(flags
& PERF_FLAG_FD_NO_GROUP
)) {
4376 group_file
= fget_light(group_fd
, &fput_needed
);
4378 goto err_put_context
;
4379 if (group_file
->f_op
!= &perf_fops
)
4380 goto err_put_context
;
4382 group_leader
= group_file
->private_data
;
4384 * Do not allow a recursive hierarchy (this new sibling
4385 * becoming part of another group-sibling):
4387 if (group_leader
->group_leader
!= group_leader
)
4388 goto err_put_context
;
4390 * Do not allow to attach to a group in a different
4391 * task or CPU context:
4393 if (group_leader
->ctx
!= ctx
)
4394 goto err_put_context
;
4396 * Only a group leader can be exclusive or pinned
4398 if (attr
.exclusive
|| attr
.pinned
)
4399 goto err_put_context
;
4402 event
= perf_event_alloc(&attr
, cpu
, ctx
, group_leader
,
4404 err
= PTR_ERR(event
);
4406 goto err_put_context
;
4408 err
= anon_inode_getfd("[perf_event]", &perf_fops
, event
, 0);
4410 goto err_free_put_context
;
4412 event_file
= fget_light(err
, &fput_needed2
);
4414 goto err_free_put_context
;
4416 if (flags
& PERF_FLAG_FD_OUTPUT
) {
4417 err
= perf_event_set_output(event
, group_fd
);
4419 goto err_fput_free_put_context
;
4422 event
->filp
= event_file
;
4423 WARN_ON_ONCE(ctx
->parent_ctx
);
4424 mutex_lock(&ctx
->mutex
);
4425 perf_install_in_context(ctx
, event
, cpu
);
4427 mutex_unlock(&ctx
->mutex
);
4429 event
->owner
= current
;
4430 get_task_struct(current
);
4431 mutex_lock(¤t
->perf_event_mutex
);
4432 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
4433 mutex_unlock(¤t
->perf_event_mutex
);
4435 err_fput_free_put_context
:
4436 fput_light(event_file
, fput_needed2
);
4438 err_free_put_context
:
4446 fput_light(group_file
, fput_needed
);
4452 * inherit a event from parent task to child task:
4454 static struct perf_event
*
4455 inherit_event(struct perf_event
*parent_event
,
4456 struct task_struct
*parent
,
4457 struct perf_event_context
*parent_ctx
,
4458 struct task_struct
*child
,
4459 struct perf_event
*group_leader
,
4460 struct perf_event_context
*child_ctx
)
4462 struct perf_event
*child_event
;
4465 * Instead of creating recursive hierarchies of events,
4466 * we link inherited events back to the original parent,
4467 * which has a filp for sure, which we use as the reference
4470 if (parent_event
->parent
)
4471 parent_event
= parent_event
->parent
;
4473 child_event
= perf_event_alloc(&parent_event
->attr
,
4474 parent_event
->cpu
, child_ctx
,
4475 group_leader
, parent_event
,
4477 if (IS_ERR(child_event
))
4482 * Make the child state follow the state of the parent event,
4483 * not its attr.disabled bit. We hold the parent's mutex,
4484 * so we won't race with perf_event_{en, dis}able_family.
4486 if (parent_event
->state
>= PERF_EVENT_STATE_INACTIVE
)
4487 child_event
->state
= PERF_EVENT_STATE_INACTIVE
;
4489 child_event
->state
= PERF_EVENT_STATE_OFF
;
4491 if (parent_event
->attr
.freq
)
4492 child_event
->hw
.sample_period
= parent_event
->hw
.sample_period
;
4495 * Link it up in the child's context:
4497 add_event_to_ctx(child_event
, child_ctx
);
4500 * Get a reference to the parent filp - we will fput it
4501 * when the child event exits. This is safe to do because
4502 * we are in the parent and we know that the filp still
4503 * exists and has a nonzero count:
4505 atomic_long_inc(&parent_event
->filp
->f_count
);
4508 * Link this into the parent event's child list
4510 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
4511 mutex_lock(&parent_event
->child_mutex
);
4512 list_add_tail(&child_event
->child_list
, &parent_event
->child_list
);
4513 mutex_unlock(&parent_event
->child_mutex
);
4518 static int inherit_group(struct perf_event
*parent_event
,
4519 struct task_struct
*parent
,
4520 struct perf_event_context
*parent_ctx
,
4521 struct task_struct
*child
,
4522 struct perf_event_context
*child_ctx
)
4524 struct perf_event
*leader
;
4525 struct perf_event
*sub
;
4526 struct perf_event
*child_ctr
;
4528 leader
= inherit_event(parent_event
, parent
, parent_ctx
,
4529 child
, NULL
, child_ctx
);
4531 return PTR_ERR(leader
);
4532 list_for_each_entry(sub
, &parent_event
->sibling_list
, group_entry
) {
4533 child_ctr
= inherit_event(sub
, parent
, parent_ctx
,
4534 child
, leader
, child_ctx
);
4535 if (IS_ERR(child_ctr
))
4536 return PTR_ERR(child_ctr
);
4541 static void sync_child_event(struct perf_event
*child_event
,
4542 struct task_struct
*child
)
4544 struct perf_event
*parent_event
= child_event
->parent
;
4547 if (child_event
->attr
.inherit_stat
)
4548 perf_event_read_event(child_event
, child
);
4550 child_val
= atomic64_read(&child_event
->count
);
4553 * Add back the child's count to the parent's count:
4555 atomic64_add(child_val
, &parent_event
->count
);
4556 atomic64_add(child_event
->total_time_enabled
,
4557 &parent_event
->child_total_time_enabled
);
4558 atomic64_add(child_event
->total_time_running
,
4559 &parent_event
->child_total_time_running
);
4562 * Remove this event from the parent's list
4564 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
4565 mutex_lock(&parent_event
->child_mutex
);
4566 list_del_init(&child_event
->child_list
);
4567 mutex_unlock(&parent_event
->child_mutex
);
4570 * Release the parent event, if this was the last
4573 fput(parent_event
->filp
);
4577 __perf_event_exit_task(struct perf_event
*child_event
,
4578 struct perf_event_context
*child_ctx
,
4579 struct task_struct
*child
)
4581 struct perf_event
*parent_event
;
4583 update_event_times(child_event
);
4584 perf_event_remove_from_context(child_event
);
4586 parent_event
= child_event
->parent
;
4588 * It can happen that parent exits first, and has events
4589 * that are still around due to the child reference. These
4590 * events need to be zapped - but otherwise linger.
4593 sync_child_event(child_event
, child
);
4594 free_event(child_event
);
4599 * When a child task exits, feed back event values to parent events.
4601 void perf_event_exit_task(struct task_struct
*child
)
4603 struct perf_event
*child_event
, *tmp
;
4604 struct perf_event_context
*child_ctx
;
4605 unsigned long flags
;
4607 if (likely(!child
->perf_event_ctxp
)) {
4608 perf_event_task(child
, NULL
, 0);
4612 local_irq_save(flags
);
4614 * We can't reschedule here because interrupts are disabled,
4615 * and either child is current or it is a task that can't be
4616 * scheduled, so we are now safe from rescheduling changing
4619 child_ctx
= child
->perf_event_ctxp
;
4620 __perf_event_task_sched_out(child_ctx
);
4623 * Take the context lock here so that if find_get_context is
4624 * reading child->perf_event_ctxp, we wait until it has
4625 * incremented the context's refcount before we do put_ctx below.
4627 spin_lock(&child_ctx
->lock
);
4628 child
->perf_event_ctxp
= NULL
;
4630 * If this context is a clone; unclone it so it can't get
4631 * swapped to another process while we're removing all
4632 * the events from it.
4634 unclone_ctx(child_ctx
);
4635 spin_unlock_irqrestore(&child_ctx
->lock
, flags
);
4638 * Report the task dead after unscheduling the events so that we
4639 * won't get any samples after PERF_RECORD_EXIT. We can however still
4640 * get a few PERF_RECORD_READ events.
4642 perf_event_task(child
, child_ctx
, 0);
4645 * We can recurse on the same lock type through:
4647 * __perf_event_exit_task()
4648 * sync_child_event()
4649 * fput(parent_event->filp)
4651 * mutex_lock(&ctx->mutex)
4653 * But since its the parent context it won't be the same instance.
4655 mutex_lock_nested(&child_ctx
->mutex
, SINGLE_DEPTH_NESTING
);
4658 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->group_list
,
4660 __perf_event_exit_task(child_event
, child_ctx
, child
);
4663 * If the last event was a group event, it will have appended all
4664 * its siblings to the list, but we obtained 'tmp' before that which
4665 * will still point to the list head terminating the iteration.
4667 if (!list_empty(&child_ctx
->group_list
))
4670 mutex_unlock(&child_ctx
->mutex
);
4676 * free an unexposed, unused context as created by inheritance by
4677 * init_task below, used by fork() in case of fail.
4679 void perf_event_free_task(struct task_struct
*task
)
4681 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
4682 struct perf_event
*event
, *tmp
;
4687 mutex_lock(&ctx
->mutex
);
4689 list_for_each_entry_safe(event
, tmp
, &ctx
->group_list
, group_entry
) {
4690 struct perf_event
*parent
= event
->parent
;
4692 if (WARN_ON_ONCE(!parent
))
4695 mutex_lock(&parent
->child_mutex
);
4696 list_del_init(&event
->child_list
);
4697 mutex_unlock(&parent
->child_mutex
);
4701 list_del_event(event
, ctx
);
4705 if (!list_empty(&ctx
->group_list
))
4708 mutex_unlock(&ctx
->mutex
);
4714 * Initialize the perf_event context in task_struct
4716 int perf_event_init_task(struct task_struct
*child
)
4718 struct perf_event_context
*child_ctx
, *parent_ctx
;
4719 struct perf_event_context
*cloned_ctx
;
4720 struct perf_event
*event
;
4721 struct task_struct
*parent
= current
;
4722 int inherited_all
= 1;
4725 child
->perf_event_ctxp
= NULL
;
4727 mutex_init(&child
->perf_event_mutex
);
4728 INIT_LIST_HEAD(&child
->perf_event_list
);
4730 if (likely(!parent
->perf_event_ctxp
))
4734 * This is executed from the parent task context, so inherit
4735 * events that have been marked for cloning.
4736 * First allocate and initialize a context for the child.
4739 child_ctx
= kmalloc(sizeof(struct perf_event_context
), GFP_KERNEL
);
4743 __perf_event_init_context(child_ctx
, child
);
4744 child
->perf_event_ctxp
= child_ctx
;
4745 get_task_struct(child
);
4748 * If the parent's context is a clone, pin it so it won't get
4751 parent_ctx
= perf_pin_task_context(parent
);
4754 * No need to check if parent_ctx != NULL here; since we saw
4755 * it non-NULL earlier, the only reason for it to become NULL
4756 * is if we exit, and since we're currently in the middle of
4757 * a fork we can't be exiting at the same time.
4761 * Lock the parent list. No need to lock the child - not PID
4762 * hashed yet and not running, so nobody can access it.
4764 mutex_lock(&parent_ctx
->mutex
);
4767 * We dont have to disable NMIs - we are only looking at
4768 * the list, not manipulating it:
4770 list_for_each_entry(event
, &parent_ctx
->group_list
, group_entry
) {
4772 if (!event
->attr
.inherit
) {
4777 ret
= inherit_group(event
, parent
, parent_ctx
,
4785 if (inherited_all
) {
4787 * Mark the child context as a clone of the parent
4788 * context, or of whatever the parent is a clone of.
4789 * Note that if the parent is a clone, it could get
4790 * uncloned at any point, but that doesn't matter
4791 * because the list of events and the generation
4792 * count can't have changed since we took the mutex.
4794 cloned_ctx
= rcu_dereference(parent_ctx
->parent_ctx
);
4796 child_ctx
->parent_ctx
= cloned_ctx
;
4797 child_ctx
->parent_gen
= parent_ctx
->parent_gen
;
4799 child_ctx
->parent_ctx
= parent_ctx
;
4800 child_ctx
->parent_gen
= parent_ctx
->generation
;
4802 get_ctx(child_ctx
->parent_ctx
);
4805 mutex_unlock(&parent_ctx
->mutex
);
4807 perf_unpin_context(parent_ctx
);
4812 static void __cpuinit
perf_event_init_cpu(int cpu
)
4814 struct perf_cpu_context
*cpuctx
;
4816 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4817 __perf_event_init_context(&cpuctx
->ctx
, NULL
);
4819 spin_lock(&perf_resource_lock
);
4820 cpuctx
->max_pertask
= perf_max_events
- perf_reserved_percpu
;
4821 spin_unlock(&perf_resource_lock
);
4823 hw_perf_event_setup(cpu
);
4826 #ifdef CONFIG_HOTPLUG_CPU
4827 static void __perf_event_exit_cpu(void *info
)
4829 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
4830 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
4831 struct perf_event
*event
, *tmp
;
4833 list_for_each_entry_safe(event
, tmp
, &ctx
->group_list
, group_entry
)
4834 __perf_event_remove_from_context(event
);
4836 static void perf_event_exit_cpu(int cpu
)
4838 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4839 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
4841 mutex_lock(&ctx
->mutex
);
4842 smp_call_function_single(cpu
, __perf_event_exit_cpu
, NULL
, 1);
4843 mutex_unlock(&ctx
->mutex
);
4846 static inline void perf_event_exit_cpu(int cpu
) { }
4849 static int __cpuinit
4850 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
4852 unsigned int cpu
= (long)hcpu
;
4856 case CPU_UP_PREPARE
:
4857 case CPU_UP_PREPARE_FROZEN
:
4858 perf_event_init_cpu(cpu
);
4862 case CPU_ONLINE_FROZEN
:
4863 hw_perf_event_setup_online(cpu
);
4866 case CPU_DOWN_PREPARE
:
4867 case CPU_DOWN_PREPARE_FROZEN
:
4868 perf_event_exit_cpu(cpu
);
4879 * This has to have a higher priority than migration_notifier in sched.c.
4881 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
4882 .notifier_call
= perf_cpu_notify
,
4886 void __init
perf_event_init(void)
4888 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
4889 (void *)(long)smp_processor_id());
4890 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_ONLINE
,
4891 (void *)(long)smp_processor_id());
4892 register_cpu_notifier(&perf_cpu_nb
);
4895 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class, char *buf
)
4897 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
4901 perf_set_reserve_percpu(struct sysdev_class
*class,
4905 struct perf_cpu_context
*cpuctx
;
4909 err
= strict_strtoul(buf
, 10, &val
);
4912 if (val
> perf_max_events
)
4915 spin_lock(&perf_resource_lock
);
4916 perf_reserved_percpu
= val
;
4917 for_each_online_cpu(cpu
) {
4918 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4919 spin_lock_irq(&cpuctx
->ctx
.lock
);
4920 mpt
= min(perf_max_events
- cpuctx
->ctx
.nr_events
,
4921 perf_max_events
- perf_reserved_percpu
);
4922 cpuctx
->max_pertask
= mpt
;
4923 spin_unlock_irq(&cpuctx
->ctx
.lock
);
4925 spin_unlock(&perf_resource_lock
);
4930 static ssize_t
perf_show_overcommit(struct sysdev_class
*class, char *buf
)
4932 return sprintf(buf
, "%d\n", perf_overcommit
);
4936 perf_set_overcommit(struct sysdev_class
*class, const char *buf
, size_t count
)
4941 err
= strict_strtoul(buf
, 10, &val
);
4947 spin_lock(&perf_resource_lock
);
4948 perf_overcommit
= val
;
4949 spin_unlock(&perf_resource_lock
);
4954 static SYSDEV_CLASS_ATTR(
4957 perf_show_reserve_percpu
,
4958 perf_set_reserve_percpu
4961 static SYSDEV_CLASS_ATTR(
4964 perf_show_overcommit
,
4968 static struct attribute
*perfclass_attrs
[] = {
4969 &attr_reserve_percpu
.attr
,
4970 &attr_overcommit
.attr
,
4974 static struct attribute_group perfclass_attr_group
= {
4975 .attrs
= perfclass_attrs
,
4976 .name
= "perf_events",
4979 static int __init
perf_event_sysfs_init(void)
4981 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
4982 &perfclass_attr_group
);
4984 device_initcall(perf_event_sysfs_init
);