2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/sysfs.h>
20 #include <linux/dcache.h>
21 #include <linux/percpu.h>
22 #include <linux/ptrace.h>
23 #include <linux/vmstat.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hardirq.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28 #include <linux/syscalls.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/perf_event.h>
32 #include <linux/ftrace_event.h>
33 #include <linux/hw_breakpoint.h>
35 #include <asm/irq_regs.h>
38 * Each CPU has a list of per CPU events:
40 static DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
42 int perf_max_events __read_mostly
= 1;
43 static int perf_reserved_percpu __read_mostly
;
44 static int perf_overcommit __read_mostly
= 1;
46 static atomic_t nr_events __read_mostly
;
47 static atomic_t nr_mmap_events __read_mostly
;
48 static atomic_t nr_comm_events __read_mostly
;
49 static atomic_t nr_task_events __read_mostly
;
52 * perf event paranoia level:
53 * -1 - not paranoid at all
54 * 0 - disallow raw tracepoint access for unpriv
55 * 1 - disallow cpu events for unpriv
56 * 2 - disallow kernel profiling for unpriv
58 int sysctl_perf_event_paranoid __read_mostly
= 1;
60 int sysctl_perf_event_mlock __read_mostly
= 512; /* 'free' kb per user */
63 * max perf event sample rate
65 int sysctl_perf_event_sample_rate __read_mostly
= 100000;
67 static atomic64_t perf_event_id
;
70 * Lock for (sysadmin-configurable) event reservations:
72 static DEFINE_SPINLOCK(perf_resource_lock
);
75 * Architecture provided APIs - weak aliases:
77 extern __weak
const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
82 void __weak
hw_perf_disable(void) { barrier(); }
83 void __weak
hw_perf_enable(void) { barrier(); }
86 hw_perf_group_sched_in(struct perf_event
*group_leader
,
87 struct perf_cpu_context
*cpuctx
,
88 struct perf_event_context
*ctx
)
93 void __weak
perf_event_print_debug(void) { }
95 static DEFINE_PER_CPU(int, perf_disable_count
);
97 void perf_disable(void)
99 if (!__get_cpu_var(perf_disable_count
)++)
103 void perf_enable(void)
105 if (!--__get_cpu_var(perf_disable_count
))
109 static void get_ctx(struct perf_event_context
*ctx
)
111 WARN_ON(!atomic_inc_not_zero(&ctx
->refcount
));
114 static void free_ctx(struct rcu_head
*head
)
116 struct perf_event_context
*ctx
;
118 ctx
= container_of(head
, struct perf_event_context
, rcu_head
);
122 static void put_ctx(struct perf_event_context
*ctx
)
124 if (atomic_dec_and_test(&ctx
->refcount
)) {
126 put_ctx(ctx
->parent_ctx
);
128 put_task_struct(ctx
->task
);
129 call_rcu(&ctx
->rcu_head
, free_ctx
);
133 static void unclone_ctx(struct perf_event_context
*ctx
)
135 if (ctx
->parent_ctx
) {
136 put_ctx(ctx
->parent_ctx
);
137 ctx
->parent_ctx
= NULL
;
142 * If we inherit events we want to return the parent event id
145 static u64
primary_event_id(struct perf_event
*event
)
150 id
= event
->parent
->id
;
156 * Get the perf_event_context for a task and lock it.
157 * This has to cope with with the fact that until it is locked,
158 * the context could get moved to another task.
160 static struct perf_event_context
*
161 perf_lock_task_context(struct task_struct
*task
, unsigned long *flags
)
163 struct perf_event_context
*ctx
;
167 ctx
= rcu_dereference(task
->perf_event_ctxp
);
170 * If this context is a clone of another, it might
171 * get swapped for another underneath us by
172 * perf_event_task_sched_out, though the
173 * rcu_read_lock() protects us from any context
174 * getting freed. Lock the context and check if it
175 * got swapped before we could get the lock, and retry
176 * if so. If we locked the right context, then it
177 * can't get swapped on us any more.
179 raw_spin_lock_irqsave(&ctx
->lock
, *flags
);
180 if (ctx
!= rcu_dereference(task
->perf_event_ctxp
)) {
181 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
185 if (!atomic_inc_not_zero(&ctx
->refcount
)) {
186 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
195 * Get the context for a task and increment its pin_count so it
196 * can't get swapped to another task. This also increments its
197 * reference count so that the context can't get freed.
199 static struct perf_event_context
*perf_pin_task_context(struct task_struct
*task
)
201 struct perf_event_context
*ctx
;
204 ctx
= perf_lock_task_context(task
, &flags
);
207 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
212 static void perf_unpin_context(struct perf_event_context
*ctx
)
216 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
218 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
222 static inline u64
perf_clock(void)
224 return cpu_clock(raw_smp_processor_id());
228 * Update the record of the current time in a context.
230 static void update_context_time(struct perf_event_context
*ctx
)
232 u64 now
= perf_clock();
234 ctx
->time
+= now
- ctx
->timestamp
;
235 ctx
->timestamp
= now
;
239 * Update the total_time_enabled and total_time_running fields for a event.
241 static void update_event_times(struct perf_event
*event
)
243 struct perf_event_context
*ctx
= event
->ctx
;
246 if (event
->state
< PERF_EVENT_STATE_INACTIVE
||
247 event
->group_leader
->state
< PERF_EVENT_STATE_INACTIVE
)
253 run_end
= event
->tstamp_stopped
;
255 event
->total_time_enabled
= run_end
- event
->tstamp_enabled
;
257 if (event
->state
== PERF_EVENT_STATE_INACTIVE
)
258 run_end
= event
->tstamp_stopped
;
262 event
->total_time_running
= run_end
- event
->tstamp_running
;
266 * Update total_time_enabled and total_time_running for all events in a group.
268 static void update_group_times(struct perf_event
*leader
)
270 struct perf_event
*event
;
272 update_event_times(leader
);
273 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
)
274 update_event_times(event
);
277 static struct list_head
*
278 ctx_group_list(struct perf_event
*event
, struct perf_event_context
*ctx
)
280 if (event
->attr
.pinned
)
281 return &ctx
->pinned_groups
;
283 return &ctx
->flexible_groups
;
287 * Add a event from the lists for its context.
288 * Must be called with ctx->mutex and ctx->lock held.
291 list_add_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
293 struct perf_event
*group_leader
= event
->group_leader
;
296 * Depending on whether it is a standalone or sibling event,
297 * add it straight to the context's event list, or to the group
298 * leader's sibling list:
300 if (group_leader
== event
) {
301 struct list_head
*list
;
303 if (is_software_event(event
))
304 event
->group_flags
|= PERF_GROUP_SOFTWARE
;
306 list
= ctx_group_list(event
, ctx
);
307 list_add_tail(&event
->group_entry
, list
);
309 if (group_leader
->group_flags
& PERF_GROUP_SOFTWARE
&&
310 !is_software_event(event
))
311 group_leader
->group_flags
&= ~PERF_GROUP_SOFTWARE
;
313 list_add_tail(&event
->group_entry
, &group_leader
->sibling_list
);
314 group_leader
->nr_siblings
++;
317 list_add_rcu(&event
->event_entry
, &ctx
->event_list
);
319 if (event
->attr
.inherit_stat
)
324 * Remove a event from the lists for its context.
325 * Must be called with ctx->mutex and ctx->lock held.
328 list_del_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
330 if (list_empty(&event
->group_entry
))
333 if (event
->attr
.inherit_stat
)
336 list_del_init(&event
->group_entry
);
337 list_del_rcu(&event
->event_entry
);
339 if (event
->group_leader
!= event
)
340 event
->group_leader
->nr_siblings
--;
342 update_group_times(event
);
345 * If event was in error state, then keep it
346 * that way, otherwise bogus counts will be
347 * returned on read(). The only way to get out
348 * of error state is by explicit re-enabling
351 if (event
->state
> PERF_EVENT_STATE_OFF
)
352 event
->state
= PERF_EVENT_STATE_OFF
;
356 perf_destroy_group(struct perf_event
*event
, struct perf_event_context
*ctx
)
358 struct perf_event
*sibling
, *tmp
;
361 * If this was a group event with sibling events then
362 * upgrade the siblings to singleton events by adding them
363 * to the context list directly:
365 list_for_each_entry_safe(sibling
, tmp
, &event
->sibling_list
, group_entry
) {
366 struct list_head
*list
;
368 list
= ctx_group_list(event
, ctx
);
369 list_move_tail(&sibling
->group_entry
, list
);
370 sibling
->group_leader
= sibling
;
372 /* Inherit group flags from the previous leader */
373 sibling
->group_flags
= event
->group_flags
;
378 event_sched_out(struct perf_event
*event
,
379 struct perf_cpu_context
*cpuctx
,
380 struct perf_event_context
*ctx
)
382 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
385 event
->state
= PERF_EVENT_STATE_INACTIVE
;
386 if (event
->pending_disable
) {
387 event
->pending_disable
= 0;
388 event
->state
= PERF_EVENT_STATE_OFF
;
390 event
->tstamp_stopped
= ctx
->time
;
391 event
->pmu
->disable(event
);
394 if (!is_software_event(event
))
395 cpuctx
->active_oncpu
--;
397 if (event
->attr
.exclusive
|| !cpuctx
->active_oncpu
)
398 cpuctx
->exclusive
= 0;
402 group_sched_out(struct perf_event
*group_event
,
403 struct perf_cpu_context
*cpuctx
,
404 struct perf_event_context
*ctx
)
406 struct perf_event
*event
;
408 if (group_event
->state
!= PERF_EVENT_STATE_ACTIVE
)
411 event_sched_out(group_event
, cpuctx
, ctx
);
414 * Schedule out siblings (if any):
416 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
)
417 event_sched_out(event
, cpuctx
, ctx
);
419 if (group_event
->attr
.exclusive
)
420 cpuctx
->exclusive
= 0;
424 * Cross CPU call to remove a performance event
426 * We disable the event on the hardware level first. After that we
427 * remove it from the context list.
429 static void __perf_event_remove_from_context(void *info
)
431 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
432 struct perf_event
*event
= info
;
433 struct perf_event_context
*ctx
= event
->ctx
;
436 * If this is a task context, we need to check whether it is
437 * the current task context of this cpu. If not it has been
438 * scheduled out before the smp call arrived.
440 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
443 raw_spin_lock(&ctx
->lock
);
445 * Protect the list operation against NMI by disabling the
446 * events on a global level.
450 event_sched_out(event
, cpuctx
, ctx
);
452 list_del_event(event
, ctx
);
456 * Allow more per task events with respect to the
459 cpuctx
->max_pertask
=
460 min(perf_max_events
- ctx
->nr_events
,
461 perf_max_events
- perf_reserved_percpu
);
465 raw_spin_unlock(&ctx
->lock
);
470 * Remove the event from a task's (or a CPU's) list of events.
472 * Must be called with ctx->mutex held.
474 * CPU events are removed with a smp call. For task events we only
475 * call when the task is on a CPU.
477 * If event->ctx is a cloned context, callers must make sure that
478 * every task struct that event->ctx->task could possibly point to
479 * remains valid. This is OK when called from perf_release since
480 * that only calls us on the top-level context, which can't be a clone.
481 * When called from perf_event_exit_task, it's OK because the
482 * context has been detached from its task.
484 static void perf_event_remove_from_context(struct perf_event
*event
)
486 struct perf_event_context
*ctx
= event
->ctx
;
487 struct task_struct
*task
= ctx
->task
;
491 * Per cpu events are removed via an smp call and
492 * the removal is always successful.
494 smp_call_function_single(event
->cpu
,
495 __perf_event_remove_from_context
,
501 task_oncpu_function_call(task
, __perf_event_remove_from_context
,
504 raw_spin_lock_irq(&ctx
->lock
);
506 * If the context is active we need to retry the smp call.
508 if (ctx
->nr_active
&& !list_empty(&event
->group_entry
)) {
509 raw_spin_unlock_irq(&ctx
->lock
);
514 * The lock prevents that this context is scheduled in so we
515 * can remove the event safely, if the call above did not
518 if (!list_empty(&event
->group_entry
))
519 list_del_event(event
, ctx
);
520 raw_spin_unlock_irq(&ctx
->lock
);
524 * Cross CPU call to disable a performance event
526 static void __perf_event_disable(void *info
)
528 struct perf_event
*event
= info
;
529 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
530 struct perf_event_context
*ctx
= event
->ctx
;
533 * If this is a per-task event, need to check whether this
534 * event's task is the current task on this cpu.
536 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
539 raw_spin_lock(&ctx
->lock
);
542 * If the event is on, turn it off.
543 * If it is in error state, leave it in error state.
545 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
) {
546 update_context_time(ctx
);
547 update_group_times(event
);
548 if (event
== event
->group_leader
)
549 group_sched_out(event
, cpuctx
, ctx
);
551 event_sched_out(event
, cpuctx
, ctx
);
552 event
->state
= PERF_EVENT_STATE_OFF
;
555 raw_spin_unlock(&ctx
->lock
);
561 * If event->ctx is a cloned context, callers must make sure that
562 * every task struct that event->ctx->task could possibly point to
563 * remains valid. This condition is satisifed when called through
564 * perf_event_for_each_child or perf_event_for_each because they
565 * hold the top-level event's child_mutex, so any descendant that
566 * goes to exit will block in sync_child_event.
567 * When called from perf_pending_event it's OK because event->ctx
568 * is the current context on this CPU and preemption is disabled,
569 * hence we can't get into perf_event_task_sched_out for this context.
571 void perf_event_disable(struct perf_event
*event
)
573 struct perf_event_context
*ctx
= event
->ctx
;
574 struct task_struct
*task
= ctx
->task
;
578 * Disable the event on the cpu that it's on
580 smp_call_function_single(event
->cpu
, __perf_event_disable
,
586 task_oncpu_function_call(task
, __perf_event_disable
, event
);
588 raw_spin_lock_irq(&ctx
->lock
);
590 * If the event is still active, we need to retry the cross-call.
592 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
593 raw_spin_unlock_irq(&ctx
->lock
);
598 * Since we have the lock this context can't be scheduled
599 * in, so we can change the state safely.
601 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
602 update_group_times(event
);
603 event
->state
= PERF_EVENT_STATE_OFF
;
606 raw_spin_unlock_irq(&ctx
->lock
);
610 event_sched_in(struct perf_event
*event
,
611 struct perf_cpu_context
*cpuctx
,
612 struct perf_event_context
*ctx
)
614 if (event
->state
<= PERF_EVENT_STATE_OFF
)
617 event
->state
= PERF_EVENT_STATE_ACTIVE
;
618 event
->oncpu
= smp_processor_id();
620 * The new state must be visible before we turn it on in the hardware:
624 if (event
->pmu
->enable(event
)) {
625 event
->state
= PERF_EVENT_STATE_INACTIVE
;
630 event
->tstamp_running
+= ctx
->time
- event
->tstamp_stopped
;
632 if (!is_software_event(event
))
633 cpuctx
->active_oncpu
++;
636 if (event
->attr
.exclusive
)
637 cpuctx
->exclusive
= 1;
643 group_sched_in(struct perf_event
*group_event
,
644 struct perf_cpu_context
*cpuctx
,
645 struct perf_event_context
*ctx
)
647 struct perf_event
*event
, *partial_group
;
650 if (group_event
->state
== PERF_EVENT_STATE_OFF
)
653 ret
= hw_perf_group_sched_in(group_event
, cpuctx
, ctx
);
655 return ret
< 0 ? ret
: 0;
657 if (event_sched_in(group_event
, cpuctx
, ctx
))
661 * Schedule in siblings as one group (if any):
663 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
664 if (event_sched_in(event
, cpuctx
, ctx
)) {
665 partial_group
= event
;
674 * Groups can be scheduled in as one unit only, so undo any
675 * partial group before returning:
677 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
678 if (event
== partial_group
)
680 event_sched_out(event
, cpuctx
, ctx
);
682 event_sched_out(group_event
, cpuctx
, ctx
);
688 * Work out whether we can put this event group on the CPU now.
690 static int group_can_go_on(struct perf_event
*event
,
691 struct perf_cpu_context
*cpuctx
,
695 * Groups consisting entirely of software events can always go on.
697 if (event
->group_flags
& PERF_GROUP_SOFTWARE
)
700 * If an exclusive group is already on, no other hardware
703 if (cpuctx
->exclusive
)
706 * If this group is exclusive and there are already
707 * events on the CPU, it can't go on.
709 if (event
->attr
.exclusive
&& cpuctx
->active_oncpu
)
712 * Otherwise, try to add it if all previous groups were able
718 static void add_event_to_ctx(struct perf_event
*event
,
719 struct perf_event_context
*ctx
)
721 list_add_event(event
, ctx
);
722 event
->tstamp_enabled
= ctx
->time
;
723 event
->tstamp_running
= ctx
->time
;
724 event
->tstamp_stopped
= ctx
->time
;
728 * Cross CPU call to install and enable a performance event
730 * Must be called with ctx->mutex held
732 static void __perf_install_in_context(void *info
)
734 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
735 struct perf_event
*event
= info
;
736 struct perf_event_context
*ctx
= event
->ctx
;
737 struct perf_event
*leader
= event
->group_leader
;
741 * If this is a task context, we need to check whether it is
742 * the current task context of this cpu. If not it has been
743 * scheduled out before the smp call arrived.
744 * Or possibly this is the right context but it isn't
745 * on this cpu because it had no events.
747 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
748 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
750 cpuctx
->task_ctx
= ctx
;
753 raw_spin_lock(&ctx
->lock
);
755 update_context_time(ctx
);
758 * Protect the list operation against NMI by disabling the
759 * events on a global level. NOP for non NMI based events.
763 add_event_to_ctx(event
, ctx
);
765 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
769 * Don't put the event on if it is disabled or if
770 * it is in a group and the group isn't on.
772 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
||
773 (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
))
777 * An exclusive event can't go on if there are already active
778 * hardware events, and no hardware event can go on if there
779 * is already an exclusive event on.
781 if (!group_can_go_on(event
, cpuctx
, 1))
784 err
= event_sched_in(event
, cpuctx
, ctx
);
788 * This event couldn't go on. If it is in a group
789 * then we have to pull the whole group off.
790 * If the event group is pinned then put it in error state.
793 group_sched_out(leader
, cpuctx
, ctx
);
794 if (leader
->attr
.pinned
) {
795 update_group_times(leader
);
796 leader
->state
= PERF_EVENT_STATE_ERROR
;
800 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
801 cpuctx
->max_pertask
--;
806 raw_spin_unlock(&ctx
->lock
);
810 * Attach a performance event to a context
812 * First we add the event to the list with the hardware enable bit
813 * in event->hw_config cleared.
815 * If the event is attached to a task which is on a CPU we use a smp
816 * call to enable it in the task context. The task might have been
817 * scheduled away, but we check this in the smp call again.
819 * Must be called with ctx->mutex held.
822 perf_install_in_context(struct perf_event_context
*ctx
,
823 struct perf_event
*event
,
826 struct task_struct
*task
= ctx
->task
;
830 * Per cpu events are installed via an smp call and
831 * the install is always successful.
833 smp_call_function_single(cpu
, __perf_install_in_context
,
839 task_oncpu_function_call(task
, __perf_install_in_context
,
842 raw_spin_lock_irq(&ctx
->lock
);
844 * we need to retry the smp call.
846 if (ctx
->is_active
&& list_empty(&event
->group_entry
)) {
847 raw_spin_unlock_irq(&ctx
->lock
);
852 * The lock prevents that this context is scheduled in so we
853 * can add the event safely, if it the call above did not
856 if (list_empty(&event
->group_entry
))
857 add_event_to_ctx(event
, ctx
);
858 raw_spin_unlock_irq(&ctx
->lock
);
862 * Put a event into inactive state and update time fields.
863 * Enabling the leader of a group effectively enables all
864 * the group members that aren't explicitly disabled, so we
865 * have to update their ->tstamp_enabled also.
866 * Note: this works for group members as well as group leaders
867 * since the non-leader members' sibling_lists will be empty.
869 static void __perf_event_mark_enabled(struct perf_event
*event
,
870 struct perf_event_context
*ctx
)
872 struct perf_event
*sub
;
874 event
->state
= PERF_EVENT_STATE_INACTIVE
;
875 event
->tstamp_enabled
= ctx
->time
- event
->total_time_enabled
;
876 list_for_each_entry(sub
, &event
->sibling_list
, group_entry
)
877 if (sub
->state
>= PERF_EVENT_STATE_INACTIVE
)
878 sub
->tstamp_enabled
=
879 ctx
->time
- sub
->total_time_enabled
;
883 * Cross CPU call to enable a performance event
885 static void __perf_event_enable(void *info
)
887 struct perf_event
*event
= info
;
888 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
889 struct perf_event_context
*ctx
= event
->ctx
;
890 struct perf_event
*leader
= event
->group_leader
;
894 * If this is a per-task event, need to check whether this
895 * event's task is the current task on this cpu.
897 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
898 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
900 cpuctx
->task_ctx
= ctx
;
903 raw_spin_lock(&ctx
->lock
);
905 update_context_time(ctx
);
907 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
909 __perf_event_mark_enabled(event
, ctx
);
911 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
915 * If the event is in a group and isn't the group leader,
916 * then don't put it on unless the group is on.
918 if (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
)
921 if (!group_can_go_on(event
, cpuctx
, 1)) {
926 err
= group_sched_in(event
, cpuctx
, ctx
);
928 err
= event_sched_in(event
, cpuctx
, ctx
);
934 * If this event can't go on and it's part of a
935 * group, then the whole group has to come off.
938 group_sched_out(leader
, cpuctx
, ctx
);
939 if (leader
->attr
.pinned
) {
940 update_group_times(leader
);
941 leader
->state
= PERF_EVENT_STATE_ERROR
;
946 raw_spin_unlock(&ctx
->lock
);
952 * If event->ctx is a cloned context, callers must make sure that
953 * every task struct that event->ctx->task could possibly point to
954 * remains valid. This condition is satisfied when called through
955 * perf_event_for_each_child or perf_event_for_each as described
956 * for perf_event_disable.
958 void perf_event_enable(struct perf_event
*event
)
960 struct perf_event_context
*ctx
= event
->ctx
;
961 struct task_struct
*task
= ctx
->task
;
965 * Enable the event on the cpu that it's on
967 smp_call_function_single(event
->cpu
, __perf_event_enable
,
972 raw_spin_lock_irq(&ctx
->lock
);
973 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
977 * If the event is in error state, clear that first.
978 * That way, if we see the event in error state below, we
979 * know that it has gone back into error state, as distinct
980 * from the task having been scheduled away before the
981 * cross-call arrived.
983 if (event
->state
== PERF_EVENT_STATE_ERROR
)
984 event
->state
= PERF_EVENT_STATE_OFF
;
987 raw_spin_unlock_irq(&ctx
->lock
);
988 task_oncpu_function_call(task
, __perf_event_enable
, event
);
990 raw_spin_lock_irq(&ctx
->lock
);
993 * If the context is active and the event is still off,
994 * we need to retry the cross-call.
996 if (ctx
->is_active
&& event
->state
== PERF_EVENT_STATE_OFF
)
1000 * Since we have the lock this context can't be scheduled
1001 * in, so we can change the state safely.
1003 if (event
->state
== PERF_EVENT_STATE_OFF
)
1004 __perf_event_mark_enabled(event
, ctx
);
1007 raw_spin_unlock_irq(&ctx
->lock
);
1010 static int perf_event_refresh(struct perf_event
*event
, int refresh
)
1013 * not supported on inherited events
1015 if (event
->attr
.inherit
)
1018 atomic_add(refresh
, &event
->event_limit
);
1019 perf_event_enable(event
);
1025 EVENT_FLEXIBLE
= 0x1,
1027 EVENT_ALL
= EVENT_FLEXIBLE
| EVENT_PINNED
,
1030 static void ctx_sched_out(struct perf_event_context
*ctx
,
1031 struct perf_cpu_context
*cpuctx
,
1032 enum event_type_t event_type
)
1034 struct perf_event
*event
;
1036 raw_spin_lock(&ctx
->lock
);
1038 if (likely(!ctx
->nr_events
))
1040 update_context_time(ctx
);
1043 if (!ctx
->nr_active
)
1046 if (event_type
& EVENT_PINNED
)
1047 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
)
1048 group_sched_out(event
, cpuctx
, ctx
);
1050 if (event_type
& EVENT_FLEXIBLE
)
1051 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
)
1052 group_sched_out(event
, cpuctx
, ctx
);
1057 raw_spin_unlock(&ctx
->lock
);
1061 * Test whether two contexts are equivalent, i.e. whether they
1062 * have both been cloned from the same version of the same context
1063 * and they both have the same number of enabled events.
1064 * If the number of enabled events is the same, then the set
1065 * of enabled events should be the same, because these are both
1066 * inherited contexts, therefore we can't access individual events
1067 * in them directly with an fd; we can only enable/disable all
1068 * events via prctl, or enable/disable all events in a family
1069 * via ioctl, which will have the same effect on both contexts.
1071 static int context_equiv(struct perf_event_context
*ctx1
,
1072 struct perf_event_context
*ctx2
)
1074 return ctx1
->parent_ctx
&& ctx1
->parent_ctx
== ctx2
->parent_ctx
1075 && ctx1
->parent_gen
== ctx2
->parent_gen
1076 && !ctx1
->pin_count
&& !ctx2
->pin_count
;
1079 static void __perf_event_sync_stat(struct perf_event
*event
,
1080 struct perf_event
*next_event
)
1084 if (!event
->attr
.inherit_stat
)
1088 * Update the event value, we cannot use perf_event_read()
1089 * because we're in the middle of a context switch and have IRQs
1090 * disabled, which upsets smp_call_function_single(), however
1091 * we know the event must be on the current CPU, therefore we
1092 * don't need to use it.
1094 switch (event
->state
) {
1095 case PERF_EVENT_STATE_ACTIVE
:
1096 event
->pmu
->read(event
);
1099 case PERF_EVENT_STATE_INACTIVE
:
1100 update_event_times(event
);
1108 * In order to keep per-task stats reliable we need to flip the event
1109 * values when we flip the contexts.
1111 value
= atomic64_read(&next_event
->count
);
1112 value
= atomic64_xchg(&event
->count
, value
);
1113 atomic64_set(&next_event
->count
, value
);
1115 swap(event
->total_time_enabled
, next_event
->total_time_enabled
);
1116 swap(event
->total_time_running
, next_event
->total_time_running
);
1119 * Since we swizzled the values, update the user visible data too.
1121 perf_event_update_userpage(event
);
1122 perf_event_update_userpage(next_event
);
1125 #define list_next_entry(pos, member) \
1126 list_entry(pos->member.next, typeof(*pos), member)
1128 static void perf_event_sync_stat(struct perf_event_context
*ctx
,
1129 struct perf_event_context
*next_ctx
)
1131 struct perf_event
*event
, *next_event
;
1136 update_context_time(ctx
);
1138 event
= list_first_entry(&ctx
->event_list
,
1139 struct perf_event
, event_entry
);
1141 next_event
= list_first_entry(&next_ctx
->event_list
,
1142 struct perf_event
, event_entry
);
1144 while (&event
->event_entry
!= &ctx
->event_list
&&
1145 &next_event
->event_entry
!= &next_ctx
->event_list
) {
1147 __perf_event_sync_stat(event
, next_event
);
1149 event
= list_next_entry(event
, event_entry
);
1150 next_event
= list_next_entry(next_event
, event_entry
);
1155 * Called from scheduler to remove the events of the current task,
1156 * with interrupts disabled.
1158 * We stop each event and update the event value in event->count.
1160 * This does not protect us against NMI, but disable()
1161 * sets the disabled bit in the control field of event _before_
1162 * accessing the event control register. If a NMI hits, then it will
1163 * not restart the event.
1165 void perf_event_task_sched_out(struct task_struct
*task
,
1166 struct task_struct
*next
)
1168 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1169 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1170 struct perf_event_context
*next_ctx
;
1171 struct perf_event_context
*parent
;
1174 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES
, 1, 1, NULL
, 0);
1176 if (likely(!ctx
|| !cpuctx
->task_ctx
))
1180 parent
= rcu_dereference(ctx
->parent_ctx
);
1181 next_ctx
= next
->perf_event_ctxp
;
1182 if (parent
&& next_ctx
&&
1183 rcu_dereference(next_ctx
->parent_ctx
) == parent
) {
1185 * Looks like the two contexts are clones, so we might be
1186 * able to optimize the context switch. We lock both
1187 * contexts and check that they are clones under the
1188 * lock (including re-checking that neither has been
1189 * uncloned in the meantime). It doesn't matter which
1190 * order we take the locks because no other cpu could
1191 * be trying to lock both of these tasks.
1193 raw_spin_lock(&ctx
->lock
);
1194 raw_spin_lock_nested(&next_ctx
->lock
, SINGLE_DEPTH_NESTING
);
1195 if (context_equiv(ctx
, next_ctx
)) {
1197 * XXX do we need a memory barrier of sorts
1198 * wrt to rcu_dereference() of perf_event_ctxp
1200 task
->perf_event_ctxp
= next_ctx
;
1201 next
->perf_event_ctxp
= ctx
;
1203 next_ctx
->task
= task
;
1206 perf_event_sync_stat(ctx
, next_ctx
);
1208 raw_spin_unlock(&next_ctx
->lock
);
1209 raw_spin_unlock(&ctx
->lock
);
1214 ctx_sched_out(ctx
, cpuctx
, EVENT_ALL
);
1215 cpuctx
->task_ctx
= NULL
;
1219 static void task_ctx_sched_out(struct perf_event_context
*ctx
,
1220 enum event_type_t event_type
)
1222 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1224 if (!cpuctx
->task_ctx
)
1227 if (WARN_ON_ONCE(ctx
!= cpuctx
->task_ctx
))
1230 ctx_sched_out(ctx
, cpuctx
, event_type
);
1231 cpuctx
->task_ctx
= NULL
;
1235 * Called with IRQs disabled
1237 static void __perf_event_task_sched_out(struct perf_event_context
*ctx
)
1239 task_ctx_sched_out(ctx
, EVENT_ALL
);
1243 * Called with IRQs disabled
1245 static void cpu_ctx_sched_out(struct perf_cpu_context
*cpuctx
,
1246 enum event_type_t event_type
)
1248 ctx_sched_out(&cpuctx
->ctx
, cpuctx
, event_type
);
1252 ctx_pinned_sched_in(struct perf_event_context
*ctx
,
1253 struct perf_cpu_context
*cpuctx
)
1255 struct perf_event
*event
;
1257 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1258 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1260 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1263 if (group_can_go_on(event
, cpuctx
, 1))
1264 group_sched_in(event
, cpuctx
, ctx
);
1267 * If this pinned group hasn't been scheduled,
1268 * put it in error state.
1270 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1271 update_group_times(event
);
1272 event
->state
= PERF_EVENT_STATE_ERROR
;
1278 ctx_flexible_sched_in(struct perf_event_context
*ctx
,
1279 struct perf_cpu_context
*cpuctx
)
1281 struct perf_event
*event
;
1284 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1285 /* Ignore events in OFF or ERROR state */
1286 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1289 * Listen to the 'cpu' scheduling filter constraint
1292 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1295 if (group_can_go_on(event
, cpuctx
, can_add_hw
))
1296 if (group_sched_in(event
, cpuctx
, ctx
))
1302 ctx_sched_in(struct perf_event_context
*ctx
,
1303 struct perf_cpu_context
*cpuctx
,
1304 enum event_type_t event_type
)
1306 raw_spin_lock(&ctx
->lock
);
1308 if (likely(!ctx
->nr_events
))
1311 ctx
->timestamp
= perf_clock();
1316 * First go through the list and put on any pinned groups
1317 * in order to give them the best chance of going on.
1319 if (event_type
& EVENT_PINNED
)
1320 ctx_pinned_sched_in(ctx
, cpuctx
);
1322 /* Then walk through the lower prio flexible groups */
1323 if (event_type
& EVENT_FLEXIBLE
)
1324 ctx_flexible_sched_in(ctx
, cpuctx
);
1328 raw_spin_unlock(&ctx
->lock
);
1331 static void cpu_ctx_sched_in(struct perf_cpu_context
*cpuctx
,
1332 enum event_type_t event_type
)
1334 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
1336 ctx_sched_in(ctx
, cpuctx
, event_type
);
1339 static void task_ctx_sched_in(struct task_struct
*task
,
1340 enum event_type_t event_type
)
1342 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1343 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1347 if (cpuctx
->task_ctx
== ctx
)
1349 ctx_sched_in(ctx
, cpuctx
, event_type
);
1350 cpuctx
->task_ctx
= ctx
;
1353 * Called from scheduler to add the events of the current task
1354 * with interrupts disabled.
1356 * We restore the event value and then enable it.
1358 * This does not protect us against NMI, but enable()
1359 * sets the enabled bit in the control field of event _before_
1360 * accessing the event control register. If a NMI hits, then it will
1361 * keep the event running.
1363 void perf_event_task_sched_in(struct task_struct
*task
)
1365 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1366 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1371 if (cpuctx
->task_ctx
== ctx
)
1375 * We want to keep the following priority order:
1376 * cpu pinned (that don't need to move), task pinned,
1377 * cpu flexible, task flexible.
1379 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1381 ctx_sched_in(ctx
, cpuctx
, EVENT_PINNED
);
1382 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1383 ctx_sched_in(ctx
, cpuctx
, EVENT_FLEXIBLE
);
1385 cpuctx
->task_ctx
= ctx
;
1388 #define MAX_INTERRUPTS (~0ULL)
1390 static void perf_log_throttle(struct perf_event
*event
, int enable
);
1392 static u64
perf_calculate_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1394 u64 frequency
= event
->attr
.sample_freq
;
1395 u64 sec
= NSEC_PER_SEC
;
1396 u64 divisor
, dividend
;
1398 int count_fls
, nsec_fls
, frequency_fls
, sec_fls
;
1400 count_fls
= fls64(count
);
1401 nsec_fls
= fls64(nsec
);
1402 frequency_fls
= fls64(frequency
);
1406 * We got @count in @nsec, with a target of sample_freq HZ
1407 * the target period becomes:
1410 * period = -------------------
1411 * @nsec * sample_freq
1416 * Reduce accuracy by one bit such that @a and @b converge
1417 * to a similar magnitude.
1419 #define REDUCE_FLS(a, b) \
1421 if (a##_fls > b##_fls) { \
1431 * Reduce accuracy until either term fits in a u64, then proceed with
1432 * the other, so that finally we can do a u64/u64 division.
1434 while (count_fls
+ sec_fls
> 64 && nsec_fls
+ frequency_fls
> 64) {
1435 REDUCE_FLS(nsec
, frequency
);
1436 REDUCE_FLS(sec
, count
);
1439 if (count_fls
+ sec_fls
> 64) {
1440 divisor
= nsec
* frequency
;
1442 while (count_fls
+ sec_fls
> 64) {
1443 REDUCE_FLS(count
, sec
);
1447 dividend
= count
* sec
;
1449 dividend
= count
* sec
;
1451 while (nsec_fls
+ frequency_fls
> 64) {
1452 REDUCE_FLS(nsec
, frequency
);
1456 divisor
= nsec
* frequency
;
1459 return div64_u64(dividend
, divisor
);
1462 static void perf_event_stop(struct perf_event
*event
)
1464 if (!event
->pmu
->stop
)
1465 return event
->pmu
->disable(event
);
1467 return event
->pmu
->stop(event
);
1470 static int perf_event_start(struct perf_event
*event
)
1472 if (!event
->pmu
->start
)
1473 return event
->pmu
->enable(event
);
1475 return event
->pmu
->start(event
);
1478 static void perf_adjust_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1480 struct hw_perf_event
*hwc
= &event
->hw
;
1481 u64 period
, sample_period
;
1484 period
= perf_calculate_period(event
, nsec
, count
);
1486 delta
= (s64
)(period
- hwc
->sample_period
);
1487 delta
= (delta
+ 7) / 8; /* low pass filter */
1489 sample_period
= hwc
->sample_period
+ delta
;
1494 hwc
->sample_period
= sample_period
;
1496 if (atomic64_read(&hwc
->period_left
) > 8*sample_period
) {
1498 perf_event_stop(event
);
1499 atomic64_set(&hwc
->period_left
, 0);
1500 perf_event_start(event
);
1505 static void perf_ctx_adjust_freq(struct perf_event_context
*ctx
)
1507 struct perf_event
*event
;
1508 struct hw_perf_event
*hwc
;
1509 u64 interrupts
, now
;
1512 raw_spin_lock(&ctx
->lock
);
1513 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
1514 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
1517 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1522 interrupts
= hwc
->interrupts
;
1523 hwc
->interrupts
= 0;
1526 * unthrottle events on the tick
1528 if (interrupts
== MAX_INTERRUPTS
) {
1529 perf_log_throttle(event
, 1);
1531 event
->pmu
->unthrottle(event
);
1535 if (!event
->attr
.freq
|| !event
->attr
.sample_freq
)
1539 event
->pmu
->read(event
);
1540 now
= atomic64_read(&event
->count
);
1541 delta
= now
- hwc
->freq_count_stamp
;
1542 hwc
->freq_count_stamp
= now
;
1545 perf_adjust_period(event
, TICK_NSEC
, delta
);
1548 raw_spin_unlock(&ctx
->lock
);
1552 * Round-robin a context's events:
1554 static void rotate_ctx(struct perf_event_context
*ctx
)
1556 raw_spin_lock(&ctx
->lock
);
1558 /* Rotate the first entry last of non-pinned groups */
1559 list_rotate_left(&ctx
->flexible_groups
);
1561 raw_spin_unlock(&ctx
->lock
);
1564 void perf_event_task_tick(struct task_struct
*curr
)
1566 struct perf_cpu_context
*cpuctx
;
1567 struct perf_event_context
*ctx
;
1570 if (!atomic_read(&nr_events
))
1573 cpuctx
= &__get_cpu_var(perf_cpu_context
);
1574 if (cpuctx
->ctx
.nr_events
&&
1575 cpuctx
->ctx
.nr_events
!= cpuctx
->ctx
.nr_active
)
1578 ctx
= curr
->perf_event_ctxp
;
1579 if (ctx
&& ctx
->nr_events
&& ctx
->nr_events
!= ctx
->nr_active
)
1582 perf_ctx_adjust_freq(&cpuctx
->ctx
);
1584 perf_ctx_adjust_freq(ctx
);
1590 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1592 task_ctx_sched_out(ctx
, EVENT_FLEXIBLE
);
1594 rotate_ctx(&cpuctx
->ctx
);
1598 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1600 task_ctx_sched_in(curr
, EVENT_FLEXIBLE
);
1604 static int event_enable_on_exec(struct perf_event
*event
,
1605 struct perf_event_context
*ctx
)
1607 if (!event
->attr
.enable_on_exec
)
1610 event
->attr
.enable_on_exec
= 0;
1611 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
1614 __perf_event_mark_enabled(event
, ctx
);
1620 * Enable all of a task's events that have been marked enable-on-exec.
1621 * This expects task == current.
1623 static void perf_event_enable_on_exec(struct task_struct
*task
)
1625 struct perf_event_context
*ctx
;
1626 struct perf_event
*event
;
1627 unsigned long flags
;
1631 local_irq_save(flags
);
1632 ctx
= task
->perf_event_ctxp
;
1633 if (!ctx
|| !ctx
->nr_events
)
1636 __perf_event_task_sched_out(ctx
);
1638 raw_spin_lock(&ctx
->lock
);
1640 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1641 ret
= event_enable_on_exec(event
, ctx
);
1646 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1647 ret
= event_enable_on_exec(event
, ctx
);
1653 * Unclone this context if we enabled any event.
1658 raw_spin_unlock(&ctx
->lock
);
1660 perf_event_task_sched_in(task
);
1662 local_irq_restore(flags
);
1666 * Cross CPU call to read the hardware event
1668 static void __perf_event_read(void *info
)
1670 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1671 struct perf_event
*event
= info
;
1672 struct perf_event_context
*ctx
= event
->ctx
;
1675 * If this is a task context, we need to check whether it is
1676 * the current task context of this cpu. If not it has been
1677 * scheduled out before the smp call arrived. In that case
1678 * event->count would have been updated to a recent sample
1679 * when the event was scheduled out.
1681 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
1684 raw_spin_lock(&ctx
->lock
);
1685 update_context_time(ctx
);
1686 update_event_times(event
);
1687 raw_spin_unlock(&ctx
->lock
);
1689 event
->pmu
->read(event
);
1692 static u64
perf_event_read(struct perf_event
*event
)
1695 * If event is enabled and currently active on a CPU, update the
1696 * value in the event structure:
1698 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
1699 smp_call_function_single(event
->oncpu
,
1700 __perf_event_read
, event
, 1);
1701 } else if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1702 struct perf_event_context
*ctx
= event
->ctx
;
1703 unsigned long flags
;
1705 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
1706 update_context_time(ctx
);
1707 update_event_times(event
);
1708 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
1711 return atomic64_read(&event
->count
);
1715 * Initialize the perf_event context in a task_struct:
1718 __perf_event_init_context(struct perf_event_context
*ctx
,
1719 struct task_struct
*task
)
1721 raw_spin_lock_init(&ctx
->lock
);
1722 mutex_init(&ctx
->mutex
);
1723 INIT_LIST_HEAD(&ctx
->pinned_groups
);
1724 INIT_LIST_HEAD(&ctx
->flexible_groups
);
1725 INIT_LIST_HEAD(&ctx
->event_list
);
1726 atomic_set(&ctx
->refcount
, 1);
1730 static struct perf_event_context
*find_get_context(pid_t pid
, int cpu
)
1732 struct perf_event_context
*ctx
;
1733 struct perf_cpu_context
*cpuctx
;
1734 struct task_struct
*task
;
1735 unsigned long flags
;
1738 if (pid
== -1 && cpu
!= -1) {
1739 /* Must be root to operate on a CPU event: */
1740 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1741 return ERR_PTR(-EACCES
);
1743 if (cpu
< 0 || cpu
>= nr_cpumask_bits
)
1744 return ERR_PTR(-EINVAL
);
1747 * We could be clever and allow to attach a event to an
1748 * offline CPU and activate it when the CPU comes up, but
1751 if (!cpu_online(cpu
))
1752 return ERR_PTR(-ENODEV
);
1754 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1765 task
= find_task_by_vpid(pid
);
1767 get_task_struct(task
);
1771 return ERR_PTR(-ESRCH
);
1774 * Can't attach events to a dying task.
1777 if (task
->flags
& PF_EXITING
)
1780 /* Reuse ptrace permission checks for now. */
1782 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
1786 ctx
= perf_lock_task_context(task
, &flags
);
1789 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
1793 ctx
= kzalloc(sizeof(struct perf_event_context
), GFP_KERNEL
);
1797 __perf_event_init_context(ctx
, task
);
1799 if (cmpxchg(&task
->perf_event_ctxp
, NULL
, ctx
)) {
1801 * We raced with some other task; use
1802 * the context they set.
1807 get_task_struct(task
);
1810 put_task_struct(task
);
1814 put_task_struct(task
);
1815 return ERR_PTR(err
);
1818 static void perf_event_free_filter(struct perf_event
*event
);
1820 static void free_event_rcu(struct rcu_head
*head
)
1822 struct perf_event
*event
;
1824 event
= container_of(head
, struct perf_event
, rcu_head
);
1826 put_pid_ns(event
->ns
);
1827 perf_event_free_filter(event
);
1831 static void perf_pending_sync(struct perf_event
*event
);
1833 static void free_event(struct perf_event
*event
)
1835 perf_pending_sync(event
);
1837 if (!event
->parent
) {
1838 atomic_dec(&nr_events
);
1839 if (event
->attr
.mmap
)
1840 atomic_dec(&nr_mmap_events
);
1841 if (event
->attr
.comm
)
1842 atomic_dec(&nr_comm_events
);
1843 if (event
->attr
.task
)
1844 atomic_dec(&nr_task_events
);
1847 if (event
->output
) {
1848 fput(event
->output
->filp
);
1849 event
->output
= NULL
;
1853 event
->destroy(event
);
1855 put_ctx(event
->ctx
);
1856 call_rcu(&event
->rcu_head
, free_event_rcu
);
1859 int perf_event_release_kernel(struct perf_event
*event
)
1861 struct perf_event_context
*ctx
= event
->ctx
;
1864 * Remove from the PMU, can't get re-enabled since we got
1865 * here because the last ref went.
1867 perf_event_disable(event
);
1869 WARN_ON_ONCE(ctx
->parent_ctx
);
1870 mutex_lock(&ctx
->mutex
);
1871 raw_spin_lock_irq(&ctx
->lock
);
1872 list_del_event(event
, ctx
);
1873 perf_destroy_group(event
, ctx
);
1874 raw_spin_unlock_irq(&ctx
->lock
);
1875 mutex_unlock(&ctx
->mutex
);
1877 mutex_lock(&event
->owner
->perf_event_mutex
);
1878 list_del_init(&event
->owner_entry
);
1879 mutex_unlock(&event
->owner
->perf_event_mutex
);
1880 put_task_struct(event
->owner
);
1886 EXPORT_SYMBOL_GPL(perf_event_release_kernel
);
1889 * Called when the last reference to the file is gone.
1891 static int perf_release(struct inode
*inode
, struct file
*file
)
1893 struct perf_event
*event
= file
->private_data
;
1895 file
->private_data
= NULL
;
1897 return perf_event_release_kernel(event
);
1900 static int perf_event_read_size(struct perf_event
*event
)
1902 int entry
= sizeof(u64
); /* value */
1906 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1907 size
+= sizeof(u64
);
1909 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1910 size
+= sizeof(u64
);
1912 if (event
->attr
.read_format
& PERF_FORMAT_ID
)
1913 entry
+= sizeof(u64
);
1915 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
) {
1916 nr
+= event
->group_leader
->nr_siblings
;
1917 size
+= sizeof(u64
);
1925 u64
perf_event_read_value(struct perf_event
*event
, u64
*enabled
, u64
*running
)
1927 struct perf_event
*child
;
1933 mutex_lock(&event
->child_mutex
);
1934 total
+= perf_event_read(event
);
1935 *enabled
+= event
->total_time_enabled
+
1936 atomic64_read(&event
->child_total_time_enabled
);
1937 *running
+= event
->total_time_running
+
1938 atomic64_read(&event
->child_total_time_running
);
1940 list_for_each_entry(child
, &event
->child_list
, child_list
) {
1941 total
+= perf_event_read(child
);
1942 *enabled
+= child
->total_time_enabled
;
1943 *running
+= child
->total_time_running
;
1945 mutex_unlock(&event
->child_mutex
);
1949 EXPORT_SYMBOL_GPL(perf_event_read_value
);
1951 static int perf_event_read_group(struct perf_event
*event
,
1952 u64 read_format
, char __user
*buf
)
1954 struct perf_event
*leader
= event
->group_leader
, *sub
;
1955 int n
= 0, size
= 0, ret
= -EFAULT
;
1956 struct perf_event_context
*ctx
= leader
->ctx
;
1958 u64 count
, enabled
, running
;
1960 mutex_lock(&ctx
->mutex
);
1961 count
= perf_event_read_value(leader
, &enabled
, &running
);
1963 values
[n
++] = 1 + leader
->nr_siblings
;
1964 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1965 values
[n
++] = enabled
;
1966 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1967 values
[n
++] = running
;
1968 values
[n
++] = count
;
1969 if (read_format
& PERF_FORMAT_ID
)
1970 values
[n
++] = primary_event_id(leader
);
1972 size
= n
* sizeof(u64
);
1974 if (copy_to_user(buf
, values
, size
))
1979 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
1982 values
[n
++] = perf_event_read_value(sub
, &enabled
, &running
);
1983 if (read_format
& PERF_FORMAT_ID
)
1984 values
[n
++] = primary_event_id(sub
);
1986 size
= n
* sizeof(u64
);
1988 if (copy_to_user(buf
+ ret
, values
, size
)) {
1996 mutex_unlock(&ctx
->mutex
);
2001 static int perf_event_read_one(struct perf_event
*event
,
2002 u64 read_format
, char __user
*buf
)
2004 u64 enabled
, running
;
2008 values
[n
++] = perf_event_read_value(event
, &enabled
, &running
);
2009 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2010 values
[n
++] = enabled
;
2011 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2012 values
[n
++] = running
;
2013 if (read_format
& PERF_FORMAT_ID
)
2014 values
[n
++] = primary_event_id(event
);
2016 if (copy_to_user(buf
, values
, n
* sizeof(u64
)))
2019 return n
* sizeof(u64
);
2023 * Read the performance event - simple non blocking version for now
2026 perf_read_hw(struct perf_event
*event
, char __user
*buf
, size_t count
)
2028 u64 read_format
= event
->attr
.read_format
;
2032 * Return end-of-file for a read on a event that is in
2033 * error state (i.e. because it was pinned but it couldn't be
2034 * scheduled on to the CPU at some point).
2036 if (event
->state
== PERF_EVENT_STATE_ERROR
)
2039 if (count
< perf_event_read_size(event
))
2042 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2043 if (read_format
& PERF_FORMAT_GROUP
)
2044 ret
= perf_event_read_group(event
, read_format
, buf
);
2046 ret
= perf_event_read_one(event
, read_format
, buf
);
2052 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
2054 struct perf_event
*event
= file
->private_data
;
2056 return perf_read_hw(event
, buf
, count
);
2059 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
2061 struct perf_event
*event
= file
->private_data
;
2062 struct perf_mmap_data
*data
;
2063 unsigned int events
= POLL_HUP
;
2066 data
= rcu_dereference(event
->data
);
2068 events
= atomic_xchg(&data
->poll
, 0);
2071 poll_wait(file
, &event
->waitq
, wait
);
2076 static void perf_event_reset(struct perf_event
*event
)
2078 (void)perf_event_read(event
);
2079 atomic64_set(&event
->count
, 0);
2080 perf_event_update_userpage(event
);
2084 * Holding the top-level event's child_mutex means that any
2085 * descendant process that has inherited this event will block
2086 * in sync_child_event if it goes to exit, thus satisfying the
2087 * task existence requirements of perf_event_enable/disable.
2089 static void perf_event_for_each_child(struct perf_event
*event
,
2090 void (*func
)(struct perf_event
*))
2092 struct perf_event
*child
;
2094 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2095 mutex_lock(&event
->child_mutex
);
2097 list_for_each_entry(child
, &event
->child_list
, child_list
)
2099 mutex_unlock(&event
->child_mutex
);
2102 static void perf_event_for_each(struct perf_event
*event
,
2103 void (*func
)(struct perf_event
*))
2105 struct perf_event_context
*ctx
= event
->ctx
;
2106 struct perf_event
*sibling
;
2108 WARN_ON_ONCE(ctx
->parent_ctx
);
2109 mutex_lock(&ctx
->mutex
);
2110 event
= event
->group_leader
;
2112 perf_event_for_each_child(event
, func
);
2114 list_for_each_entry(sibling
, &event
->sibling_list
, group_entry
)
2115 perf_event_for_each_child(event
, func
);
2116 mutex_unlock(&ctx
->mutex
);
2119 static int perf_event_period(struct perf_event
*event
, u64 __user
*arg
)
2121 struct perf_event_context
*ctx
= event
->ctx
;
2126 if (!event
->attr
.sample_period
)
2129 size
= copy_from_user(&value
, arg
, sizeof(value
));
2130 if (size
!= sizeof(value
))
2136 raw_spin_lock_irq(&ctx
->lock
);
2137 if (event
->attr
.freq
) {
2138 if (value
> sysctl_perf_event_sample_rate
) {
2143 event
->attr
.sample_freq
= value
;
2145 event
->attr
.sample_period
= value
;
2146 event
->hw
.sample_period
= value
;
2149 raw_spin_unlock_irq(&ctx
->lock
);
2154 static int perf_event_set_output(struct perf_event
*event
, int output_fd
);
2155 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
);
2157 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2159 struct perf_event
*event
= file
->private_data
;
2160 void (*func
)(struct perf_event
*);
2164 case PERF_EVENT_IOC_ENABLE
:
2165 func
= perf_event_enable
;
2167 case PERF_EVENT_IOC_DISABLE
:
2168 func
= perf_event_disable
;
2170 case PERF_EVENT_IOC_RESET
:
2171 func
= perf_event_reset
;
2174 case PERF_EVENT_IOC_REFRESH
:
2175 return perf_event_refresh(event
, arg
);
2177 case PERF_EVENT_IOC_PERIOD
:
2178 return perf_event_period(event
, (u64 __user
*)arg
);
2180 case PERF_EVENT_IOC_SET_OUTPUT
:
2181 return perf_event_set_output(event
, arg
);
2183 case PERF_EVENT_IOC_SET_FILTER
:
2184 return perf_event_set_filter(event
, (void __user
*)arg
);
2190 if (flags
& PERF_IOC_FLAG_GROUP
)
2191 perf_event_for_each(event
, func
);
2193 perf_event_for_each_child(event
, func
);
2198 int perf_event_task_enable(void)
2200 struct perf_event
*event
;
2202 mutex_lock(¤t
->perf_event_mutex
);
2203 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2204 perf_event_for_each_child(event
, perf_event_enable
);
2205 mutex_unlock(¤t
->perf_event_mutex
);
2210 int perf_event_task_disable(void)
2212 struct perf_event
*event
;
2214 mutex_lock(¤t
->perf_event_mutex
);
2215 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2216 perf_event_for_each_child(event
, perf_event_disable
);
2217 mutex_unlock(¤t
->perf_event_mutex
);
2222 #ifndef PERF_EVENT_INDEX_OFFSET
2223 # define PERF_EVENT_INDEX_OFFSET 0
2226 static int perf_event_index(struct perf_event
*event
)
2228 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
2231 return event
->hw
.idx
+ 1 - PERF_EVENT_INDEX_OFFSET
;
2235 * Callers need to ensure there can be no nesting of this function, otherwise
2236 * the seqlock logic goes bad. We can not serialize this because the arch
2237 * code calls this from NMI context.
2239 void perf_event_update_userpage(struct perf_event
*event
)
2241 struct perf_event_mmap_page
*userpg
;
2242 struct perf_mmap_data
*data
;
2245 data
= rcu_dereference(event
->data
);
2249 userpg
= data
->user_page
;
2252 * Disable preemption so as to not let the corresponding user-space
2253 * spin too long if we get preempted.
2258 userpg
->index
= perf_event_index(event
);
2259 userpg
->offset
= atomic64_read(&event
->count
);
2260 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
2261 userpg
->offset
-= atomic64_read(&event
->hw
.prev_count
);
2263 userpg
->time_enabled
= event
->total_time_enabled
+
2264 atomic64_read(&event
->child_total_time_enabled
);
2266 userpg
->time_running
= event
->total_time_running
+
2267 atomic64_read(&event
->child_total_time_running
);
2276 static unsigned long perf_data_size(struct perf_mmap_data
*data
)
2278 return data
->nr_pages
<< (PAGE_SHIFT
+ data
->data_order
);
2281 #ifndef CONFIG_PERF_USE_VMALLOC
2284 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2287 static struct page
*
2288 perf_mmap_to_page(struct perf_mmap_data
*data
, unsigned long pgoff
)
2290 if (pgoff
> data
->nr_pages
)
2294 return virt_to_page(data
->user_page
);
2296 return virt_to_page(data
->data_pages
[pgoff
- 1]);
2299 static struct perf_mmap_data
*
2300 perf_mmap_data_alloc(struct perf_event
*event
, int nr_pages
)
2302 struct perf_mmap_data
*data
;
2306 WARN_ON(atomic_read(&event
->mmap_count
));
2308 size
= sizeof(struct perf_mmap_data
);
2309 size
+= nr_pages
* sizeof(void *);
2311 data
= kzalloc(size
, GFP_KERNEL
);
2315 data
->user_page
= (void *)get_zeroed_page(GFP_KERNEL
);
2316 if (!data
->user_page
)
2317 goto fail_user_page
;
2319 for (i
= 0; i
< nr_pages
; i
++) {
2320 data
->data_pages
[i
] = (void *)get_zeroed_page(GFP_KERNEL
);
2321 if (!data
->data_pages
[i
])
2322 goto fail_data_pages
;
2325 data
->data_order
= 0;
2326 data
->nr_pages
= nr_pages
;
2331 for (i
--; i
>= 0; i
--)
2332 free_page((unsigned long)data
->data_pages
[i
]);
2334 free_page((unsigned long)data
->user_page
);
2343 static void perf_mmap_free_page(unsigned long addr
)
2345 struct page
*page
= virt_to_page((void *)addr
);
2347 page
->mapping
= NULL
;
2351 static void perf_mmap_data_free(struct perf_mmap_data
*data
)
2355 perf_mmap_free_page((unsigned long)data
->user_page
);
2356 for (i
= 0; i
< data
->nr_pages
; i
++)
2357 perf_mmap_free_page((unsigned long)data
->data_pages
[i
]);
2364 * Back perf_mmap() with vmalloc memory.
2366 * Required for architectures that have d-cache aliasing issues.
2369 static struct page
*
2370 perf_mmap_to_page(struct perf_mmap_data
*data
, unsigned long pgoff
)
2372 if (pgoff
> (1UL << data
->data_order
))
2375 return vmalloc_to_page((void *)data
->user_page
+ pgoff
* PAGE_SIZE
);
2378 static void perf_mmap_unmark_page(void *addr
)
2380 struct page
*page
= vmalloc_to_page(addr
);
2382 page
->mapping
= NULL
;
2385 static void perf_mmap_data_free_work(struct work_struct
*work
)
2387 struct perf_mmap_data
*data
;
2391 data
= container_of(work
, struct perf_mmap_data
, work
);
2392 nr
= 1 << data
->data_order
;
2394 base
= data
->user_page
;
2395 for (i
= 0; i
< nr
+ 1; i
++)
2396 perf_mmap_unmark_page(base
+ (i
* PAGE_SIZE
));
2402 static void perf_mmap_data_free(struct perf_mmap_data
*data
)
2404 schedule_work(&data
->work
);
2407 static struct perf_mmap_data
*
2408 perf_mmap_data_alloc(struct perf_event
*event
, int nr_pages
)
2410 struct perf_mmap_data
*data
;
2414 WARN_ON(atomic_read(&event
->mmap_count
));
2416 size
= sizeof(struct perf_mmap_data
);
2417 size
+= sizeof(void *);
2419 data
= kzalloc(size
, GFP_KERNEL
);
2423 INIT_WORK(&data
->work
, perf_mmap_data_free_work
);
2425 all_buf
= vmalloc_user((nr_pages
+ 1) * PAGE_SIZE
);
2429 data
->user_page
= all_buf
;
2430 data
->data_pages
[0] = all_buf
+ PAGE_SIZE
;
2431 data
->data_order
= ilog2(nr_pages
);
2445 static int perf_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2447 struct perf_event
*event
= vma
->vm_file
->private_data
;
2448 struct perf_mmap_data
*data
;
2449 int ret
= VM_FAULT_SIGBUS
;
2451 if (vmf
->flags
& FAULT_FLAG_MKWRITE
) {
2452 if (vmf
->pgoff
== 0)
2458 data
= rcu_dereference(event
->data
);
2462 if (vmf
->pgoff
&& (vmf
->flags
& FAULT_FLAG_WRITE
))
2465 vmf
->page
= perf_mmap_to_page(data
, vmf
->pgoff
);
2469 get_page(vmf
->page
);
2470 vmf
->page
->mapping
= vma
->vm_file
->f_mapping
;
2471 vmf
->page
->index
= vmf
->pgoff
;
2481 perf_mmap_data_init(struct perf_event
*event
, struct perf_mmap_data
*data
)
2483 long max_size
= perf_data_size(data
);
2485 atomic_set(&data
->lock
, -1);
2487 if (event
->attr
.watermark
) {
2488 data
->watermark
= min_t(long, max_size
,
2489 event
->attr
.wakeup_watermark
);
2492 if (!data
->watermark
)
2493 data
->watermark
= max_size
/ 2;
2496 rcu_assign_pointer(event
->data
, data
);
2499 static void perf_mmap_data_free_rcu(struct rcu_head
*rcu_head
)
2501 struct perf_mmap_data
*data
;
2503 data
= container_of(rcu_head
, struct perf_mmap_data
, rcu_head
);
2504 perf_mmap_data_free(data
);
2507 static void perf_mmap_data_release(struct perf_event
*event
)
2509 struct perf_mmap_data
*data
= event
->data
;
2511 WARN_ON(atomic_read(&event
->mmap_count
));
2513 rcu_assign_pointer(event
->data
, NULL
);
2514 call_rcu(&data
->rcu_head
, perf_mmap_data_free_rcu
);
2517 static void perf_mmap_open(struct vm_area_struct
*vma
)
2519 struct perf_event
*event
= vma
->vm_file
->private_data
;
2521 atomic_inc(&event
->mmap_count
);
2524 static void perf_mmap_close(struct vm_area_struct
*vma
)
2526 struct perf_event
*event
= vma
->vm_file
->private_data
;
2528 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2529 if (atomic_dec_and_mutex_lock(&event
->mmap_count
, &event
->mmap_mutex
)) {
2530 unsigned long size
= perf_data_size(event
->data
);
2531 struct user_struct
*user
= current_user();
2533 atomic_long_sub((size
>> PAGE_SHIFT
) + 1, &user
->locked_vm
);
2534 vma
->vm_mm
->locked_vm
-= event
->data
->nr_locked
;
2535 perf_mmap_data_release(event
);
2536 mutex_unlock(&event
->mmap_mutex
);
2540 static const struct vm_operations_struct perf_mmap_vmops
= {
2541 .open
= perf_mmap_open
,
2542 .close
= perf_mmap_close
,
2543 .fault
= perf_mmap_fault
,
2544 .page_mkwrite
= perf_mmap_fault
,
2547 static int perf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2549 struct perf_event
*event
= file
->private_data
;
2550 unsigned long user_locked
, user_lock_limit
;
2551 struct user_struct
*user
= current_user();
2552 unsigned long locked
, lock_limit
;
2553 struct perf_mmap_data
*data
;
2554 unsigned long vma_size
;
2555 unsigned long nr_pages
;
2556 long user_extra
, extra
;
2559 if (!(vma
->vm_flags
& VM_SHARED
))
2562 vma_size
= vma
->vm_end
- vma
->vm_start
;
2563 nr_pages
= (vma_size
/ PAGE_SIZE
) - 1;
2566 * If we have data pages ensure they're a power-of-two number, so we
2567 * can do bitmasks instead of modulo.
2569 if (nr_pages
!= 0 && !is_power_of_2(nr_pages
))
2572 if (vma_size
!= PAGE_SIZE
* (1 + nr_pages
))
2575 if (vma
->vm_pgoff
!= 0)
2578 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2579 mutex_lock(&event
->mmap_mutex
);
2580 if (event
->output
) {
2585 if (atomic_inc_not_zero(&event
->mmap_count
)) {
2586 if (nr_pages
!= event
->data
->nr_pages
)
2591 user_extra
= nr_pages
+ 1;
2592 user_lock_limit
= sysctl_perf_event_mlock
>> (PAGE_SHIFT
- 10);
2595 * Increase the limit linearly with more CPUs:
2597 user_lock_limit
*= num_online_cpus();
2599 user_locked
= atomic_long_read(&user
->locked_vm
) + user_extra
;
2602 if (user_locked
> user_lock_limit
)
2603 extra
= user_locked
- user_lock_limit
;
2605 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
2606 lock_limit
>>= PAGE_SHIFT
;
2607 locked
= vma
->vm_mm
->locked_vm
+ extra
;
2609 if ((locked
> lock_limit
) && perf_paranoid_tracepoint_raw() &&
2610 !capable(CAP_IPC_LOCK
)) {
2615 WARN_ON(event
->data
);
2617 data
= perf_mmap_data_alloc(event
, nr_pages
);
2623 perf_mmap_data_init(event
, data
);
2625 atomic_set(&event
->mmap_count
, 1);
2626 atomic_long_add(user_extra
, &user
->locked_vm
);
2627 vma
->vm_mm
->locked_vm
+= extra
;
2628 event
->data
->nr_locked
= extra
;
2629 if (vma
->vm_flags
& VM_WRITE
)
2630 event
->data
->writable
= 1;
2633 mutex_unlock(&event
->mmap_mutex
);
2635 vma
->vm_flags
|= VM_RESERVED
;
2636 vma
->vm_ops
= &perf_mmap_vmops
;
2641 static int perf_fasync(int fd
, struct file
*filp
, int on
)
2643 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
2644 struct perf_event
*event
= filp
->private_data
;
2647 mutex_lock(&inode
->i_mutex
);
2648 retval
= fasync_helper(fd
, filp
, on
, &event
->fasync
);
2649 mutex_unlock(&inode
->i_mutex
);
2657 static const struct file_operations perf_fops
= {
2658 .release
= perf_release
,
2661 .unlocked_ioctl
= perf_ioctl
,
2662 .compat_ioctl
= perf_ioctl
,
2664 .fasync
= perf_fasync
,
2670 * If there's data, ensure we set the poll() state and publish everything
2671 * to user-space before waking everybody up.
2674 void perf_event_wakeup(struct perf_event
*event
)
2676 wake_up_all(&event
->waitq
);
2678 if (event
->pending_kill
) {
2679 kill_fasync(&event
->fasync
, SIGIO
, event
->pending_kill
);
2680 event
->pending_kill
= 0;
2687 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2689 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2690 * single linked list and use cmpxchg() to add entries lockless.
2693 static void perf_pending_event(struct perf_pending_entry
*entry
)
2695 struct perf_event
*event
= container_of(entry
,
2696 struct perf_event
, pending
);
2698 if (event
->pending_disable
) {
2699 event
->pending_disable
= 0;
2700 __perf_event_disable(event
);
2703 if (event
->pending_wakeup
) {
2704 event
->pending_wakeup
= 0;
2705 perf_event_wakeup(event
);
2709 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2711 static DEFINE_PER_CPU(struct perf_pending_entry
*, perf_pending_head
) = {
2715 static void perf_pending_queue(struct perf_pending_entry
*entry
,
2716 void (*func
)(struct perf_pending_entry
*))
2718 struct perf_pending_entry
**head
;
2720 if (cmpxchg(&entry
->next
, NULL
, PENDING_TAIL
) != NULL
)
2725 head
= &get_cpu_var(perf_pending_head
);
2728 entry
->next
= *head
;
2729 } while (cmpxchg(head
, entry
->next
, entry
) != entry
->next
);
2731 set_perf_event_pending();
2733 put_cpu_var(perf_pending_head
);
2736 static int __perf_pending_run(void)
2738 struct perf_pending_entry
*list
;
2741 list
= xchg(&__get_cpu_var(perf_pending_head
), PENDING_TAIL
);
2742 while (list
!= PENDING_TAIL
) {
2743 void (*func
)(struct perf_pending_entry
*);
2744 struct perf_pending_entry
*entry
= list
;
2751 * Ensure we observe the unqueue before we issue the wakeup,
2752 * so that we won't be waiting forever.
2753 * -- see perf_not_pending().
2764 static inline int perf_not_pending(struct perf_event
*event
)
2767 * If we flush on whatever cpu we run, there is a chance we don't
2771 __perf_pending_run();
2775 * Ensure we see the proper queue state before going to sleep
2776 * so that we do not miss the wakeup. -- see perf_pending_handle()
2779 return event
->pending
.next
== NULL
;
2782 static void perf_pending_sync(struct perf_event
*event
)
2784 wait_event(event
->waitq
, perf_not_pending(event
));
2787 void perf_event_do_pending(void)
2789 __perf_pending_run();
2793 * Callchain support -- arch specific
2796 __weak
struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
2802 void perf_arch_fetch_caller_regs(struct pt_regs
*regs
, unsigned long ip
, int skip
)
2810 static bool perf_output_space(struct perf_mmap_data
*data
, unsigned long tail
,
2811 unsigned long offset
, unsigned long head
)
2815 if (!data
->writable
)
2818 mask
= perf_data_size(data
) - 1;
2820 offset
= (offset
- tail
) & mask
;
2821 head
= (head
- tail
) & mask
;
2823 if ((int)(head
- offset
) < 0)
2829 static void perf_output_wakeup(struct perf_output_handle
*handle
)
2831 atomic_set(&handle
->data
->poll
, POLL_IN
);
2834 handle
->event
->pending_wakeup
= 1;
2835 perf_pending_queue(&handle
->event
->pending
,
2836 perf_pending_event
);
2838 perf_event_wakeup(handle
->event
);
2842 * Curious locking construct.
2844 * We need to ensure a later event_id doesn't publish a head when a former
2845 * event_id isn't done writing. However since we need to deal with NMIs we
2846 * cannot fully serialize things.
2848 * What we do is serialize between CPUs so we only have to deal with NMI
2849 * nesting on a single CPU.
2851 * We only publish the head (and generate a wakeup) when the outer-most
2852 * event_id completes.
2854 static void perf_output_lock(struct perf_output_handle
*handle
)
2856 struct perf_mmap_data
*data
= handle
->data
;
2857 int cur
, cpu
= get_cpu();
2862 cur
= atomic_cmpxchg(&data
->lock
, -1, cpu
);
2874 static void perf_output_unlock(struct perf_output_handle
*handle
)
2876 struct perf_mmap_data
*data
= handle
->data
;
2880 data
->done_head
= data
->head
;
2882 if (!handle
->locked
)
2887 * The xchg implies a full barrier that ensures all writes are done
2888 * before we publish the new head, matched by a rmb() in userspace when
2889 * reading this position.
2891 while ((head
= atomic_long_xchg(&data
->done_head
, 0)))
2892 data
->user_page
->data_head
= head
;
2895 * NMI can happen here, which means we can miss a done_head update.
2898 cpu
= atomic_xchg(&data
->lock
, -1);
2899 WARN_ON_ONCE(cpu
!= smp_processor_id());
2902 * Therefore we have to validate we did not indeed do so.
2904 if (unlikely(atomic_long_read(&data
->done_head
))) {
2906 * Since we had it locked, we can lock it again.
2908 while (atomic_cmpxchg(&data
->lock
, -1, cpu
) != -1)
2914 if (atomic_xchg(&data
->wakeup
, 0))
2915 perf_output_wakeup(handle
);
2920 void perf_output_copy(struct perf_output_handle
*handle
,
2921 const void *buf
, unsigned int len
)
2923 unsigned int pages_mask
;
2924 unsigned long offset
;
2928 offset
= handle
->offset
;
2929 pages_mask
= handle
->data
->nr_pages
- 1;
2930 pages
= handle
->data
->data_pages
;
2933 unsigned long page_offset
;
2934 unsigned long page_size
;
2937 nr
= (offset
>> PAGE_SHIFT
) & pages_mask
;
2938 page_size
= 1UL << (handle
->data
->data_order
+ PAGE_SHIFT
);
2939 page_offset
= offset
& (page_size
- 1);
2940 size
= min_t(unsigned int, page_size
- page_offset
, len
);
2942 memcpy(pages
[nr
] + page_offset
, buf
, size
);
2949 handle
->offset
= offset
;
2952 * Check we didn't copy past our reservation window, taking the
2953 * possible unsigned int wrap into account.
2955 WARN_ON_ONCE(((long)(handle
->head
- handle
->offset
)) < 0);
2958 int perf_output_begin(struct perf_output_handle
*handle
,
2959 struct perf_event
*event
, unsigned int size
,
2960 int nmi
, int sample
)
2962 struct perf_event
*output_event
;
2963 struct perf_mmap_data
*data
;
2964 unsigned long tail
, offset
, head
;
2967 struct perf_event_header header
;
2974 * For inherited events we send all the output towards the parent.
2977 event
= event
->parent
;
2979 output_event
= rcu_dereference(event
->output
);
2981 event
= output_event
;
2983 data
= rcu_dereference(event
->data
);
2987 handle
->data
= data
;
2988 handle
->event
= event
;
2990 handle
->sample
= sample
;
2992 if (!data
->nr_pages
)
2995 have_lost
= atomic_read(&data
->lost
);
2997 size
+= sizeof(lost_event
);
2999 perf_output_lock(handle
);
3003 * Userspace could choose to issue a mb() before updating the
3004 * tail pointer. So that all reads will be completed before the
3007 tail
= ACCESS_ONCE(data
->user_page
->data_tail
);
3009 offset
= head
= atomic_long_read(&data
->head
);
3011 if (unlikely(!perf_output_space(data
, tail
, offset
, head
)))
3013 } while (atomic_long_cmpxchg(&data
->head
, offset
, head
) != offset
);
3015 handle
->offset
= offset
;
3016 handle
->head
= head
;
3018 if (head
- tail
> data
->watermark
)
3019 atomic_set(&data
->wakeup
, 1);
3022 lost_event
.header
.type
= PERF_RECORD_LOST
;
3023 lost_event
.header
.misc
= 0;
3024 lost_event
.header
.size
= sizeof(lost_event
);
3025 lost_event
.id
= event
->id
;
3026 lost_event
.lost
= atomic_xchg(&data
->lost
, 0);
3028 perf_output_put(handle
, lost_event
);
3034 atomic_inc(&data
->lost
);
3035 perf_output_unlock(handle
);
3042 void perf_output_end(struct perf_output_handle
*handle
)
3044 struct perf_event
*event
= handle
->event
;
3045 struct perf_mmap_data
*data
= handle
->data
;
3047 int wakeup_events
= event
->attr
.wakeup_events
;
3049 if (handle
->sample
&& wakeup_events
) {
3050 int events
= atomic_inc_return(&data
->events
);
3051 if (events
>= wakeup_events
) {
3052 atomic_sub(wakeup_events
, &data
->events
);
3053 atomic_set(&data
->wakeup
, 1);
3057 perf_output_unlock(handle
);
3061 static u32
perf_event_pid(struct perf_event
*event
, struct task_struct
*p
)
3064 * only top level events have the pid namespace they were created in
3067 event
= event
->parent
;
3069 return task_tgid_nr_ns(p
, event
->ns
);
3072 static u32
perf_event_tid(struct perf_event
*event
, struct task_struct
*p
)
3075 * only top level events have the pid namespace they were created in
3078 event
= event
->parent
;
3080 return task_pid_nr_ns(p
, event
->ns
);
3083 static void perf_output_read_one(struct perf_output_handle
*handle
,
3084 struct perf_event
*event
)
3086 u64 read_format
= event
->attr
.read_format
;
3090 values
[n
++] = atomic64_read(&event
->count
);
3091 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
3092 values
[n
++] = event
->total_time_enabled
+
3093 atomic64_read(&event
->child_total_time_enabled
);
3095 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
3096 values
[n
++] = event
->total_time_running
+
3097 atomic64_read(&event
->child_total_time_running
);
3099 if (read_format
& PERF_FORMAT_ID
)
3100 values
[n
++] = primary_event_id(event
);
3102 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3106 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3108 static void perf_output_read_group(struct perf_output_handle
*handle
,
3109 struct perf_event
*event
)
3111 struct perf_event
*leader
= event
->group_leader
, *sub
;
3112 u64 read_format
= event
->attr
.read_format
;
3116 values
[n
++] = 1 + leader
->nr_siblings
;
3118 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
3119 values
[n
++] = leader
->total_time_enabled
;
3121 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
3122 values
[n
++] = leader
->total_time_running
;
3124 if (leader
!= event
)
3125 leader
->pmu
->read(leader
);
3127 values
[n
++] = atomic64_read(&leader
->count
);
3128 if (read_format
& PERF_FORMAT_ID
)
3129 values
[n
++] = primary_event_id(leader
);
3131 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3133 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
3137 sub
->pmu
->read(sub
);
3139 values
[n
++] = atomic64_read(&sub
->count
);
3140 if (read_format
& PERF_FORMAT_ID
)
3141 values
[n
++] = primary_event_id(sub
);
3143 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3147 static void perf_output_read(struct perf_output_handle
*handle
,
3148 struct perf_event
*event
)
3150 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
)
3151 perf_output_read_group(handle
, event
);
3153 perf_output_read_one(handle
, event
);
3156 void perf_output_sample(struct perf_output_handle
*handle
,
3157 struct perf_event_header
*header
,
3158 struct perf_sample_data
*data
,
3159 struct perf_event
*event
)
3161 u64 sample_type
= data
->type
;
3163 perf_output_put(handle
, *header
);
3165 if (sample_type
& PERF_SAMPLE_IP
)
3166 perf_output_put(handle
, data
->ip
);
3168 if (sample_type
& PERF_SAMPLE_TID
)
3169 perf_output_put(handle
, data
->tid_entry
);
3171 if (sample_type
& PERF_SAMPLE_TIME
)
3172 perf_output_put(handle
, data
->time
);
3174 if (sample_type
& PERF_SAMPLE_ADDR
)
3175 perf_output_put(handle
, data
->addr
);
3177 if (sample_type
& PERF_SAMPLE_ID
)
3178 perf_output_put(handle
, data
->id
);
3180 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
3181 perf_output_put(handle
, data
->stream_id
);
3183 if (sample_type
& PERF_SAMPLE_CPU
)
3184 perf_output_put(handle
, data
->cpu_entry
);
3186 if (sample_type
& PERF_SAMPLE_PERIOD
)
3187 perf_output_put(handle
, data
->period
);
3189 if (sample_type
& PERF_SAMPLE_READ
)
3190 perf_output_read(handle
, event
);
3192 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3193 if (data
->callchain
) {
3196 if (data
->callchain
)
3197 size
+= data
->callchain
->nr
;
3199 size
*= sizeof(u64
);
3201 perf_output_copy(handle
, data
->callchain
, size
);
3204 perf_output_put(handle
, nr
);
3208 if (sample_type
& PERF_SAMPLE_RAW
) {
3210 perf_output_put(handle
, data
->raw
->size
);
3211 perf_output_copy(handle
, data
->raw
->data
,
3218 .size
= sizeof(u32
),
3221 perf_output_put(handle
, raw
);
3226 void perf_prepare_sample(struct perf_event_header
*header
,
3227 struct perf_sample_data
*data
,
3228 struct perf_event
*event
,
3229 struct pt_regs
*regs
)
3231 u64 sample_type
= event
->attr
.sample_type
;
3233 data
->type
= sample_type
;
3235 header
->type
= PERF_RECORD_SAMPLE
;
3236 header
->size
= sizeof(*header
);
3239 header
->misc
|= perf_misc_flags(regs
);
3241 if (sample_type
& PERF_SAMPLE_IP
) {
3242 data
->ip
= perf_instruction_pointer(regs
);
3244 header
->size
+= sizeof(data
->ip
);
3247 if (sample_type
& PERF_SAMPLE_TID
) {
3248 /* namespace issues */
3249 data
->tid_entry
.pid
= perf_event_pid(event
, current
);
3250 data
->tid_entry
.tid
= perf_event_tid(event
, current
);
3252 header
->size
+= sizeof(data
->tid_entry
);
3255 if (sample_type
& PERF_SAMPLE_TIME
) {
3256 data
->time
= perf_clock();
3258 header
->size
+= sizeof(data
->time
);
3261 if (sample_type
& PERF_SAMPLE_ADDR
)
3262 header
->size
+= sizeof(data
->addr
);
3264 if (sample_type
& PERF_SAMPLE_ID
) {
3265 data
->id
= primary_event_id(event
);
3267 header
->size
+= sizeof(data
->id
);
3270 if (sample_type
& PERF_SAMPLE_STREAM_ID
) {
3271 data
->stream_id
= event
->id
;
3273 header
->size
+= sizeof(data
->stream_id
);
3276 if (sample_type
& PERF_SAMPLE_CPU
) {
3277 data
->cpu_entry
.cpu
= raw_smp_processor_id();
3278 data
->cpu_entry
.reserved
= 0;
3280 header
->size
+= sizeof(data
->cpu_entry
);
3283 if (sample_type
& PERF_SAMPLE_PERIOD
)
3284 header
->size
+= sizeof(data
->period
);
3286 if (sample_type
& PERF_SAMPLE_READ
)
3287 header
->size
+= perf_event_read_size(event
);
3289 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3292 data
->callchain
= perf_callchain(regs
);
3294 if (data
->callchain
)
3295 size
+= data
->callchain
->nr
;
3297 header
->size
+= size
* sizeof(u64
);
3300 if (sample_type
& PERF_SAMPLE_RAW
) {
3301 int size
= sizeof(u32
);
3304 size
+= data
->raw
->size
;
3306 size
+= sizeof(u32
);
3308 WARN_ON_ONCE(size
& (sizeof(u64
)-1));
3309 header
->size
+= size
;
3313 static void perf_event_output(struct perf_event
*event
, int nmi
,
3314 struct perf_sample_data
*data
,
3315 struct pt_regs
*regs
)
3317 struct perf_output_handle handle
;
3318 struct perf_event_header header
;
3320 perf_prepare_sample(&header
, data
, event
, regs
);
3322 if (perf_output_begin(&handle
, event
, header
.size
, nmi
, 1))
3325 perf_output_sample(&handle
, &header
, data
, event
);
3327 perf_output_end(&handle
);
3334 struct perf_read_event
{
3335 struct perf_event_header header
;
3342 perf_event_read_event(struct perf_event
*event
,
3343 struct task_struct
*task
)
3345 struct perf_output_handle handle
;
3346 struct perf_read_event read_event
= {
3348 .type
= PERF_RECORD_READ
,
3350 .size
= sizeof(read_event
) + perf_event_read_size(event
),
3352 .pid
= perf_event_pid(event
, task
),
3353 .tid
= perf_event_tid(event
, task
),
3357 ret
= perf_output_begin(&handle
, event
, read_event
.header
.size
, 0, 0);
3361 perf_output_put(&handle
, read_event
);
3362 perf_output_read(&handle
, event
);
3364 perf_output_end(&handle
);
3368 * task tracking -- fork/exit
3370 * enabled by: attr.comm | attr.mmap | attr.task
3373 struct perf_task_event
{
3374 struct task_struct
*task
;
3375 struct perf_event_context
*task_ctx
;
3378 struct perf_event_header header
;
3388 static void perf_event_task_output(struct perf_event
*event
,
3389 struct perf_task_event
*task_event
)
3391 struct perf_output_handle handle
;
3392 struct task_struct
*task
= task_event
->task
;
3393 unsigned long flags
;
3397 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3398 * in perf_output_lock() from interrupt context, it's game over.
3400 local_irq_save(flags
);
3402 size
= task_event
->event_id
.header
.size
;
3403 ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3406 local_irq_restore(flags
);
3410 task_event
->event_id
.pid
= perf_event_pid(event
, task
);
3411 task_event
->event_id
.ppid
= perf_event_pid(event
, current
);
3413 task_event
->event_id
.tid
= perf_event_tid(event
, task
);
3414 task_event
->event_id
.ptid
= perf_event_tid(event
, current
);
3416 perf_output_put(&handle
, task_event
->event_id
);
3418 perf_output_end(&handle
);
3419 local_irq_restore(flags
);
3422 static int perf_event_task_match(struct perf_event
*event
)
3424 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3427 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3430 if (event
->attr
.comm
|| event
->attr
.mmap
|| event
->attr
.task
)
3436 static void perf_event_task_ctx(struct perf_event_context
*ctx
,
3437 struct perf_task_event
*task_event
)
3439 struct perf_event
*event
;
3441 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3442 if (perf_event_task_match(event
))
3443 perf_event_task_output(event
, task_event
);
3447 static void perf_event_task_event(struct perf_task_event
*task_event
)
3449 struct perf_cpu_context
*cpuctx
;
3450 struct perf_event_context
*ctx
= task_event
->task_ctx
;
3453 cpuctx
= &get_cpu_var(perf_cpu_context
);
3454 perf_event_task_ctx(&cpuctx
->ctx
, task_event
);
3456 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3458 perf_event_task_ctx(ctx
, task_event
);
3459 put_cpu_var(perf_cpu_context
);
3463 static void perf_event_task(struct task_struct
*task
,
3464 struct perf_event_context
*task_ctx
,
3467 struct perf_task_event task_event
;
3469 if (!atomic_read(&nr_comm_events
) &&
3470 !atomic_read(&nr_mmap_events
) &&
3471 !atomic_read(&nr_task_events
))
3474 task_event
= (struct perf_task_event
){
3476 .task_ctx
= task_ctx
,
3479 .type
= new ? PERF_RECORD_FORK
: PERF_RECORD_EXIT
,
3481 .size
= sizeof(task_event
.event_id
),
3487 .time
= perf_clock(),
3491 perf_event_task_event(&task_event
);
3494 void perf_event_fork(struct task_struct
*task
)
3496 perf_event_task(task
, NULL
, 1);
3503 struct perf_comm_event
{
3504 struct task_struct
*task
;
3509 struct perf_event_header header
;
3516 static void perf_event_comm_output(struct perf_event
*event
,
3517 struct perf_comm_event
*comm_event
)
3519 struct perf_output_handle handle
;
3520 int size
= comm_event
->event_id
.header
.size
;
3521 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3526 comm_event
->event_id
.pid
= perf_event_pid(event
, comm_event
->task
);
3527 comm_event
->event_id
.tid
= perf_event_tid(event
, comm_event
->task
);
3529 perf_output_put(&handle
, comm_event
->event_id
);
3530 perf_output_copy(&handle
, comm_event
->comm
,
3531 comm_event
->comm_size
);
3532 perf_output_end(&handle
);
3535 static int perf_event_comm_match(struct perf_event
*event
)
3537 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3540 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3543 if (event
->attr
.comm
)
3549 static void perf_event_comm_ctx(struct perf_event_context
*ctx
,
3550 struct perf_comm_event
*comm_event
)
3552 struct perf_event
*event
;
3554 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3555 if (perf_event_comm_match(event
))
3556 perf_event_comm_output(event
, comm_event
);
3560 static void perf_event_comm_event(struct perf_comm_event
*comm_event
)
3562 struct perf_cpu_context
*cpuctx
;
3563 struct perf_event_context
*ctx
;
3565 char comm
[TASK_COMM_LEN
];
3567 memset(comm
, 0, sizeof(comm
));
3568 strlcpy(comm
, comm_event
->task
->comm
, sizeof(comm
));
3569 size
= ALIGN(strlen(comm
)+1, sizeof(u64
));
3571 comm_event
->comm
= comm
;
3572 comm_event
->comm_size
= size
;
3574 comm_event
->event_id
.header
.size
= sizeof(comm_event
->event_id
) + size
;
3577 cpuctx
= &get_cpu_var(perf_cpu_context
);
3578 perf_event_comm_ctx(&cpuctx
->ctx
, comm_event
);
3579 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3581 perf_event_comm_ctx(ctx
, comm_event
);
3582 put_cpu_var(perf_cpu_context
);
3586 void perf_event_comm(struct task_struct
*task
)
3588 struct perf_comm_event comm_event
;
3590 if (task
->perf_event_ctxp
)
3591 perf_event_enable_on_exec(task
);
3593 if (!atomic_read(&nr_comm_events
))
3596 comm_event
= (struct perf_comm_event
){
3602 .type
= PERF_RECORD_COMM
,
3611 perf_event_comm_event(&comm_event
);
3618 struct perf_mmap_event
{
3619 struct vm_area_struct
*vma
;
3621 const char *file_name
;
3625 struct perf_event_header header
;
3635 static void perf_event_mmap_output(struct perf_event
*event
,
3636 struct perf_mmap_event
*mmap_event
)
3638 struct perf_output_handle handle
;
3639 int size
= mmap_event
->event_id
.header
.size
;
3640 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3645 mmap_event
->event_id
.pid
= perf_event_pid(event
, current
);
3646 mmap_event
->event_id
.tid
= perf_event_tid(event
, current
);
3648 perf_output_put(&handle
, mmap_event
->event_id
);
3649 perf_output_copy(&handle
, mmap_event
->file_name
,
3650 mmap_event
->file_size
);
3651 perf_output_end(&handle
);
3654 static int perf_event_mmap_match(struct perf_event
*event
,
3655 struct perf_mmap_event
*mmap_event
)
3657 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3660 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3663 if (event
->attr
.mmap
)
3669 static void perf_event_mmap_ctx(struct perf_event_context
*ctx
,
3670 struct perf_mmap_event
*mmap_event
)
3672 struct perf_event
*event
;
3674 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3675 if (perf_event_mmap_match(event
, mmap_event
))
3676 perf_event_mmap_output(event
, mmap_event
);
3680 static void perf_event_mmap_event(struct perf_mmap_event
*mmap_event
)
3682 struct perf_cpu_context
*cpuctx
;
3683 struct perf_event_context
*ctx
;
3684 struct vm_area_struct
*vma
= mmap_event
->vma
;
3685 struct file
*file
= vma
->vm_file
;
3691 memset(tmp
, 0, sizeof(tmp
));
3695 * d_path works from the end of the buffer backwards, so we
3696 * need to add enough zero bytes after the string to handle
3697 * the 64bit alignment we do later.
3699 buf
= kzalloc(PATH_MAX
+ sizeof(u64
), GFP_KERNEL
);
3701 name
= strncpy(tmp
, "//enomem", sizeof(tmp
));
3704 name
= d_path(&file
->f_path
, buf
, PATH_MAX
);
3706 name
= strncpy(tmp
, "//toolong", sizeof(tmp
));
3710 if (arch_vma_name(mmap_event
->vma
)) {
3711 name
= strncpy(tmp
, arch_vma_name(mmap_event
->vma
),
3717 name
= strncpy(tmp
, "[vdso]", sizeof(tmp
));
3721 name
= strncpy(tmp
, "//anon", sizeof(tmp
));
3726 size
= ALIGN(strlen(name
)+1, sizeof(u64
));
3728 mmap_event
->file_name
= name
;
3729 mmap_event
->file_size
= size
;
3731 mmap_event
->event_id
.header
.size
= sizeof(mmap_event
->event_id
) + size
;
3734 cpuctx
= &get_cpu_var(perf_cpu_context
);
3735 perf_event_mmap_ctx(&cpuctx
->ctx
, mmap_event
);
3736 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3738 perf_event_mmap_ctx(ctx
, mmap_event
);
3739 put_cpu_var(perf_cpu_context
);
3745 void __perf_event_mmap(struct vm_area_struct
*vma
)
3747 struct perf_mmap_event mmap_event
;
3749 if (!atomic_read(&nr_mmap_events
))
3752 mmap_event
= (struct perf_mmap_event
){
3758 .type
= PERF_RECORD_MMAP
,
3764 .start
= vma
->vm_start
,
3765 .len
= vma
->vm_end
- vma
->vm_start
,
3766 .pgoff
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
,
3770 perf_event_mmap_event(&mmap_event
);
3774 * IRQ throttle logging
3777 static void perf_log_throttle(struct perf_event
*event
, int enable
)
3779 struct perf_output_handle handle
;
3783 struct perf_event_header header
;
3787 } throttle_event
= {
3789 .type
= PERF_RECORD_THROTTLE
,
3791 .size
= sizeof(throttle_event
),
3793 .time
= perf_clock(),
3794 .id
= primary_event_id(event
),
3795 .stream_id
= event
->id
,
3799 throttle_event
.header
.type
= PERF_RECORD_UNTHROTTLE
;
3801 ret
= perf_output_begin(&handle
, event
, sizeof(throttle_event
), 1, 0);
3805 perf_output_put(&handle
, throttle_event
);
3806 perf_output_end(&handle
);
3810 * Generic event overflow handling, sampling.
3813 static int __perf_event_overflow(struct perf_event
*event
, int nmi
,
3814 int throttle
, struct perf_sample_data
*data
,
3815 struct pt_regs
*regs
)
3817 int events
= atomic_read(&event
->event_limit
);
3818 struct hw_perf_event
*hwc
= &event
->hw
;
3821 throttle
= (throttle
&& event
->pmu
->unthrottle
!= NULL
);
3826 if (hwc
->interrupts
!= MAX_INTERRUPTS
) {
3828 if (HZ
* hwc
->interrupts
>
3829 (u64
)sysctl_perf_event_sample_rate
) {
3830 hwc
->interrupts
= MAX_INTERRUPTS
;
3831 perf_log_throttle(event
, 0);
3836 * Keep re-disabling events even though on the previous
3837 * pass we disabled it - just in case we raced with a
3838 * sched-in and the event got enabled again:
3844 if (event
->attr
.freq
) {
3845 u64 now
= perf_clock();
3846 s64 delta
= now
- hwc
->freq_time_stamp
;
3848 hwc
->freq_time_stamp
= now
;
3850 if (delta
> 0 && delta
< 2*TICK_NSEC
)
3851 perf_adjust_period(event
, delta
, hwc
->last_period
);
3855 * XXX event_limit might not quite work as expected on inherited
3859 event
->pending_kill
= POLL_IN
;
3860 if (events
&& atomic_dec_and_test(&event
->event_limit
)) {
3862 event
->pending_kill
= POLL_HUP
;
3864 event
->pending_disable
= 1;
3865 perf_pending_queue(&event
->pending
,
3866 perf_pending_event
);
3868 perf_event_disable(event
);
3871 if (event
->overflow_handler
)
3872 event
->overflow_handler(event
, nmi
, data
, regs
);
3874 perf_event_output(event
, nmi
, data
, regs
);
3879 int perf_event_overflow(struct perf_event
*event
, int nmi
,
3880 struct perf_sample_data
*data
,
3881 struct pt_regs
*regs
)
3883 return __perf_event_overflow(event
, nmi
, 1, data
, regs
);
3887 * Generic software event infrastructure
3891 * We directly increment event->count and keep a second value in
3892 * event->hw.period_left to count intervals. This period event
3893 * is kept in the range [-sample_period, 0] so that we can use the
3897 static u64
perf_swevent_set_period(struct perf_event
*event
)
3899 struct hw_perf_event
*hwc
= &event
->hw
;
3900 u64 period
= hwc
->last_period
;
3904 hwc
->last_period
= hwc
->sample_period
;
3907 old
= val
= atomic64_read(&hwc
->period_left
);
3911 nr
= div64_u64(period
+ val
, period
);
3912 offset
= nr
* period
;
3914 if (atomic64_cmpxchg(&hwc
->period_left
, old
, val
) != old
)
3920 static void perf_swevent_overflow(struct perf_event
*event
, u64 overflow
,
3921 int nmi
, struct perf_sample_data
*data
,
3922 struct pt_regs
*regs
)
3924 struct hw_perf_event
*hwc
= &event
->hw
;
3927 data
->period
= event
->hw
.last_period
;
3929 overflow
= perf_swevent_set_period(event
);
3931 if (hwc
->interrupts
== MAX_INTERRUPTS
)
3934 for (; overflow
; overflow
--) {
3935 if (__perf_event_overflow(event
, nmi
, throttle
,
3938 * We inhibit the overflow from happening when
3939 * hwc->interrupts == MAX_INTERRUPTS.
3947 static void perf_swevent_unthrottle(struct perf_event
*event
)
3950 * Nothing to do, we already reset hwc->interrupts.
3954 static void perf_swevent_add(struct perf_event
*event
, u64 nr
,
3955 int nmi
, struct perf_sample_data
*data
,
3956 struct pt_regs
*regs
)
3958 struct hw_perf_event
*hwc
= &event
->hw
;
3960 atomic64_add(nr
, &event
->count
);
3965 if (!hwc
->sample_period
)
3968 if (nr
== 1 && hwc
->sample_period
== 1 && !event
->attr
.freq
)
3969 return perf_swevent_overflow(event
, 1, nmi
, data
, regs
);
3971 if (atomic64_add_negative(nr
, &hwc
->period_left
))
3974 perf_swevent_overflow(event
, 0, nmi
, data
, regs
);
3977 static int perf_swevent_is_counting(struct perf_event
*event
)
3980 * The event is active, we're good!
3982 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
3986 * The event is off/error, not counting.
3988 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
)
3992 * The event is inactive, if the context is active
3993 * we're part of a group that didn't make it on the 'pmu',
3996 if (event
->ctx
->is_active
)
4000 * We're inactive and the context is too, this means the
4001 * task is scheduled out, we're counting events that happen
4002 * to us, like migration events.
4007 static int perf_tp_event_match(struct perf_event
*event
,
4008 struct perf_sample_data
*data
);
4010 static int perf_exclude_event(struct perf_event
*event
,
4011 struct pt_regs
*regs
)
4014 if (event
->attr
.exclude_user
&& user_mode(regs
))
4017 if (event
->attr
.exclude_kernel
&& !user_mode(regs
))
4024 static int perf_swevent_match(struct perf_event
*event
,
4025 enum perf_type_id type
,
4027 struct perf_sample_data
*data
,
4028 struct pt_regs
*regs
)
4030 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
4033 if (!perf_swevent_is_counting(event
))
4036 if (event
->attr
.type
!= type
)
4039 if (event
->attr
.config
!= event_id
)
4042 if (perf_exclude_event(event
, regs
))
4045 if (event
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
4046 !perf_tp_event_match(event
, data
))
4052 static void perf_swevent_ctx_event(struct perf_event_context
*ctx
,
4053 enum perf_type_id type
,
4054 u32 event_id
, u64 nr
, int nmi
,
4055 struct perf_sample_data
*data
,
4056 struct pt_regs
*regs
)
4058 struct perf_event
*event
;
4060 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
4061 if (perf_swevent_match(event
, type
, event_id
, data
, regs
))
4062 perf_swevent_add(event
, nr
, nmi
, data
, regs
);
4066 int perf_swevent_get_recursion_context(void)
4068 struct perf_cpu_context
*cpuctx
= &get_cpu_var(perf_cpu_context
);
4075 else if (in_softirq())
4080 if (cpuctx
->recursion
[rctx
]) {
4081 put_cpu_var(perf_cpu_context
);
4085 cpuctx
->recursion
[rctx
]++;
4090 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context
);
4092 void perf_swevent_put_recursion_context(int rctx
)
4094 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
4096 cpuctx
->recursion
[rctx
]--;
4097 put_cpu_var(perf_cpu_context
);
4099 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context
);
4101 static void do_perf_sw_event(enum perf_type_id type
, u32 event_id
,
4103 struct perf_sample_data
*data
,
4104 struct pt_regs
*regs
)
4106 struct perf_cpu_context
*cpuctx
;
4107 struct perf_event_context
*ctx
;
4109 cpuctx
= &__get_cpu_var(perf_cpu_context
);
4111 perf_swevent_ctx_event(&cpuctx
->ctx
, type
, event_id
,
4112 nr
, nmi
, data
, regs
);
4114 * doesn't really matter which of the child contexts the
4115 * events ends up in.
4117 ctx
= rcu_dereference(current
->perf_event_ctxp
);
4119 perf_swevent_ctx_event(ctx
, type
, event_id
, nr
, nmi
, data
, regs
);
4123 void __perf_sw_event(u32 event_id
, u64 nr
, int nmi
,
4124 struct pt_regs
*regs
, u64 addr
)
4126 struct perf_sample_data data
;
4129 rctx
= perf_swevent_get_recursion_context();
4133 perf_sample_data_init(&data
, addr
);
4135 do_perf_sw_event(PERF_TYPE_SOFTWARE
, event_id
, nr
, nmi
, &data
, regs
);
4137 perf_swevent_put_recursion_context(rctx
);
4140 static void perf_swevent_read(struct perf_event
*event
)
4144 static int perf_swevent_enable(struct perf_event
*event
)
4146 struct hw_perf_event
*hwc
= &event
->hw
;
4148 if (hwc
->sample_period
) {
4149 hwc
->last_period
= hwc
->sample_period
;
4150 perf_swevent_set_period(event
);
4155 static void perf_swevent_disable(struct perf_event
*event
)
4159 static const struct pmu perf_ops_generic
= {
4160 .enable
= perf_swevent_enable
,
4161 .disable
= perf_swevent_disable
,
4162 .read
= perf_swevent_read
,
4163 .unthrottle
= perf_swevent_unthrottle
,
4167 * hrtimer based swevent callback
4170 static enum hrtimer_restart
perf_swevent_hrtimer(struct hrtimer
*hrtimer
)
4172 enum hrtimer_restart ret
= HRTIMER_RESTART
;
4173 struct perf_sample_data data
;
4174 struct pt_regs
*regs
;
4175 struct perf_event
*event
;
4178 event
= container_of(hrtimer
, struct perf_event
, hw
.hrtimer
);
4179 event
->pmu
->read(event
);
4181 perf_sample_data_init(&data
, 0);
4182 data
.period
= event
->hw
.last_period
;
4183 regs
= get_irq_regs();
4185 * In case we exclude kernel IPs or are somehow not in interrupt
4186 * context, provide the next best thing, the user IP.
4188 if ((event
->attr
.exclude_kernel
|| !regs
) &&
4189 !event
->attr
.exclude_user
)
4190 regs
= task_pt_regs(current
);
4193 if (!(event
->attr
.exclude_idle
&& current
->pid
== 0))
4194 if (perf_event_overflow(event
, 0, &data
, regs
))
4195 ret
= HRTIMER_NORESTART
;
4198 period
= max_t(u64
, 10000, event
->hw
.sample_period
);
4199 hrtimer_forward_now(hrtimer
, ns_to_ktime(period
));
4204 static void perf_swevent_start_hrtimer(struct perf_event
*event
)
4206 struct hw_perf_event
*hwc
= &event
->hw
;
4208 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
4209 hwc
->hrtimer
.function
= perf_swevent_hrtimer
;
4210 if (hwc
->sample_period
) {
4213 if (hwc
->remaining
) {
4214 if (hwc
->remaining
< 0)
4217 period
= hwc
->remaining
;
4220 period
= max_t(u64
, 10000, hwc
->sample_period
);
4222 __hrtimer_start_range_ns(&hwc
->hrtimer
,
4223 ns_to_ktime(period
), 0,
4224 HRTIMER_MODE_REL
, 0);
4228 static void perf_swevent_cancel_hrtimer(struct perf_event
*event
)
4230 struct hw_perf_event
*hwc
= &event
->hw
;
4232 if (hwc
->sample_period
) {
4233 ktime_t remaining
= hrtimer_get_remaining(&hwc
->hrtimer
);
4234 hwc
->remaining
= ktime_to_ns(remaining
);
4236 hrtimer_cancel(&hwc
->hrtimer
);
4241 * Software event: cpu wall time clock
4244 static void cpu_clock_perf_event_update(struct perf_event
*event
)
4246 int cpu
= raw_smp_processor_id();
4250 now
= cpu_clock(cpu
);
4251 prev
= atomic64_xchg(&event
->hw
.prev_count
, now
);
4252 atomic64_add(now
- prev
, &event
->count
);
4255 static int cpu_clock_perf_event_enable(struct perf_event
*event
)
4257 struct hw_perf_event
*hwc
= &event
->hw
;
4258 int cpu
= raw_smp_processor_id();
4260 atomic64_set(&hwc
->prev_count
, cpu_clock(cpu
));
4261 perf_swevent_start_hrtimer(event
);
4266 static void cpu_clock_perf_event_disable(struct perf_event
*event
)
4268 perf_swevent_cancel_hrtimer(event
);
4269 cpu_clock_perf_event_update(event
);
4272 static void cpu_clock_perf_event_read(struct perf_event
*event
)
4274 cpu_clock_perf_event_update(event
);
4277 static const struct pmu perf_ops_cpu_clock
= {
4278 .enable
= cpu_clock_perf_event_enable
,
4279 .disable
= cpu_clock_perf_event_disable
,
4280 .read
= cpu_clock_perf_event_read
,
4284 * Software event: task time clock
4287 static void task_clock_perf_event_update(struct perf_event
*event
, u64 now
)
4292 prev
= atomic64_xchg(&event
->hw
.prev_count
, now
);
4294 atomic64_add(delta
, &event
->count
);
4297 static int task_clock_perf_event_enable(struct perf_event
*event
)
4299 struct hw_perf_event
*hwc
= &event
->hw
;
4302 now
= event
->ctx
->time
;
4304 atomic64_set(&hwc
->prev_count
, now
);
4306 perf_swevent_start_hrtimer(event
);
4311 static void task_clock_perf_event_disable(struct perf_event
*event
)
4313 perf_swevent_cancel_hrtimer(event
);
4314 task_clock_perf_event_update(event
, event
->ctx
->time
);
4318 static void task_clock_perf_event_read(struct perf_event
*event
)
4323 update_context_time(event
->ctx
);
4324 time
= event
->ctx
->time
;
4326 u64 now
= perf_clock();
4327 u64 delta
= now
- event
->ctx
->timestamp
;
4328 time
= event
->ctx
->time
+ delta
;
4331 task_clock_perf_event_update(event
, time
);
4334 static const struct pmu perf_ops_task_clock
= {
4335 .enable
= task_clock_perf_event_enable
,
4336 .disable
= task_clock_perf_event_disable
,
4337 .read
= task_clock_perf_event_read
,
4340 #ifdef CONFIG_EVENT_TRACING
4342 void perf_tp_event(int event_id
, u64 addr
, u64 count
, void *record
,
4343 int entry_size
, struct pt_regs
*regs
)
4345 struct perf_sample_data data
;
4346 struct perf_raw_record raw
= {
4351 perf_sample_data_init(&data
, addr
);
4354 /* Trace events already protected against recursion */
4355 do_perf_sw_event(PERF_TYPE_TRACEPOINT
, event_id
, count
, 1,
4358 EXPORT_SYMBOL_GPL(perf_tp_event
);
4360 static int perf_tp_event_match(struct perf_event
*event
,
4361 struct perf_sample_data
*data
)
4363 void *record
= data
->raw
->data
;
4365 if (likely(!event
->filter
) || filter_match_preds(event
->filter
, record
))
4370 static void tp_perf_event_destroy(struct perf_event
*event
)
4372 perf_trace_disable(event
->attr
.config
);
4375 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
4378 * Raw tracepoint data is a severe data leak, only allow root to
4381 if ((event
->attr
.sample_type
& PERF_SAMPLE_RAW
) &&
4382 perf_paranoid_tracepoint_raw() &&
4383 !capable(CAP_SYS_ADMIN
))
4384 return ERR_PTR(-EPERM
);
4386 if (perf_trace_enable(event
->attr
.config
))
4389 event
->destroy
= tp_perf_event_destroy
;
4391 return &perf_ops_generic
;
4394 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4399 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
4402 filter_str
= strndup_user(arg
, PAGE_SIZE
);
4403 if (IS_ERR(filter_str
))
4404 return PTR_ERR(filter_str
);
4406 ret
= ftrace_profile_set_filter(event
, event
->attr
.config
, filter_str
);
4412 static void perf_event_free_filter(struct perf_event
*event
)
4414 ftrace_profile_free_filter(event
);
4419 static int perf_tp_event_match(struct perf_event
*event
,
4420 struct perf_sample_data
*data
)
4425 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
4430 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4435 static void perf_event_free_filter(struct perf_event
*event
)
4439 #endif /* CONFIG_EVENT_TRACING */
4441 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4442 static void bp_perf_event_destroy(struct perf_event
*event
)
4444 release_bp_slot(event
);
4447 static const struct pmu
*bp_perf_event_init(struct perf_event
*bp
)
4451 err
= register_perf_hw_breakpoint(bp
);
4453 return ERR_PTR(err
);
4455 bp
->destroy
= bp_perf_event_destroy
;
4457 return &perf_ops_bp
;
4460 void perf_bp_event(struct perf_event
*bp
, void *data
)
4462 struct perf_sample_data sample
;
4463 struct pt_regs
*regs
= data
;
4465 perf_sample_data_init(&sample
, bp
->attr
.bp_addr
);
4467 if (!perf_exclude_event(bp
, regs
))
4468 perf_swevent_add(bp
, 1, 1, &sample
, regs
);
4471 static const struct pmu
*bp_perf_event_init(struct perf_event
*bp
)
4476 void perf_bp_event(struct perf_event
*bp
, void *regs
)
4481 atomic_t perf_swevent_enabled
[PERF_COUNT_SW_MAX
];
4483 static void sw_perf_event_destroy(struct perf_event
*event
)
4485 u64 event_id
= event
->attr
.config
;
4487 WARN_ON(event
->parent
);
4489 atomic_dec(&perf_swevent_enabled
[event_id
]);
4492 static const struct pmu
*sw_perf_event_init(struct perf_event
*event
)
4494 const struct pmu
*pmu
= NULL
;
4495 u64 event_id
= event
->attr
.config
;
4498 * Software events (currently) can't in general distinguish
4499 * between user, kernel and hypervisor events.
4500 * However, context switches and cpu migrations are considered
4501 * to be kernel events, and page faults are never hypervisor
4505 case PERF_COUNT_SW_CPU_CLOCK
:
4506 pmu
= &perf_ops_cpu_clock
;
4509 case PERF_COUNT_SW_TASK_CLOCK
:
4511 * If the user instantiates this as a per-cpu event,
4512 * use the cpu_clock event instead.
4514 if (event
->ctx
->task
)
4515 pmu
= &perf_ops_task_clock
;
4517 pmu
= &perf_ops_cpu_clock
;
4520 case PERF_COUNT_SW_PAGE_FAULTS
:
4521 case PERF_COUNT_SW_PAGE_FAULTS_MIN
:
4522 case PERF_COUNT_SW_PAGE_FAULTS_MAJ
:
4523 case PERF_COUNT_SW_CONTEXT_SWITCHES
:
4524 case PERF_COUNT_SW_CPU_MIGRATIONS
:
4525 case PERF_COUNT_SW_ALIGNMENT_FAULTS
:
4526 case PERF_COUNT_SW_EMULATION_FAULTS
:
4527 if (!event
->parent
) {
4528 atomic_inc(&perf_swevent_enabled
[event_id
]);
4529 event
->destroy
= sw_perf_event_destroy
;
4531 pmu
= &perf_ops_generic
;
4539 * Allocate and initialize a event structure
4541 static struct perf_event
*
4542 perf_event_alloc(struct perf_event_attr
*attr
,
4544 struct perf_event_context
*ctx
,
4545 struct perf_event
*group_leader
,
4546 struct perf_event
*parent_event
,
4547 perf_overflow_handler_t overflow_handler
,
4550 const struct pmu
*pmu
;
4551 struct perf_event
*event
;
4552 struct hw_perf_event
*hwc
;
4555 event
= kzalloc(sizeof(*event
), gfpflags
);
4557 return ERR_PTR(-ENOMEM
);
4560 * Single events are their own group leaders, with an
4561 * empty sibling list:
4564 group_leader
= event
;
4566 mutex_init(&event
->child_mutex
);
4567 INIT_LIST_HEAD(&event
->child_list
);
4569 INIT_LIST_HEAD(&event
->group_entry
);
4570 INIT_LIST_HEAD(&event
->event_entry
);
4571 INIT_LIST_HEAD(&event
->sibling_list
);
4572 init_waitqueue_head(&event
->waitq
);
4574 mutex_init(&event
->mmap_mutex
);
4577 event
->attr
= *attr
;
4578 event
->group_leader
= group_leader
;
4583 event
->parent
= parent_event
;
4585 event
->ns
= get_pid_ns(current
->nsproxy
->pid_ns
);
4586 event
->id
= atomic64_inc_return(&perf_event_id
);
4588 event
->state
= PERF_EVENT_STATE_INACTIVE
;
4590 if (!overflow_handler
&& parent_event
)
4591 overflow_handler
= parent_event
->overflow_handler
;
4593 event
->overflow_handler
= overflow_handler
;
4596 event
->state
= PERF_EVENT_STATE_OFF
;
4601 hwc
->sample_period
= attr
->sample_period
;
4602 if (attr
->freq
&& attr
->sample_freq
)
4603 hwc
->sample_period
= 1;
4604 hwc
->last_period
= hwc
->sample_period
;
4606 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
4609 * we currently do not support PERF_FORMAT_GROUP on inherited events
4611 if (attr
->inherit
&& (attr
->read_format
& PERF_FORMAT_GROUP
))
4614 switch (attr
->type
) {
4616 case PERF_TYPE_HARDWARE
:
4617 case PERF_TYPE_HW_CACHE
:
4618 pmu
= hw_perf_event_init(event
);
4621 case PERF_TYPE_SOFTWARE
:
4622 pmu
= sw_perf_event_init(event
);
4625 case PERF_TYPE_TRACEPOINT
:
4626 pmu
= tp_perf_event_init(event
);
4629 case PERF_TYPE_BREAKPOINT
:
4630 pmu
= bp_perf_event_init(event
);
4641 else if (IS_ERR(pmu
))
4646 put_pid_ns(event
->ns
);
4648 return ERR_PTR(err
);
4653 if (!event
->parent
) {
4654 atomic_inc(&nr_events
);
4655 if (event
->attr
.mmap
)
4656 atomic_inc(&nr_mmap_events
);
4657 if (event
->attr
.comm
)
4658 atomic_inc(&nr_comm_events
);
4659 if (event
->attr
.task
)
4660 atomic_inc(&nr_task_events
);
4666 static int perf_copy_attr(struct perf_event_attr __user
*uattr
,
4667 struct perf_event_attr
*attr
)
4672 if (!access_ok(VERIFY_WRITE
, uattr
, PERF_ATTR_SIZE_VER0
))
4676 * zero the full structure, so that a short copy will be nice.
4678 memset(attr
, 0, sizeof(*attr
));
4680 ret
= get_user(size
, &uattr
->size
);
4684 if (size
> PAGE_SIZE
) /* silly large */
4687 if (!size
) /* abi compat */
4688 size
= PERF_ATTR_SIZE_VER0
;
4690 if (size
< PERF_ATTR_SIZE_VER0
)
4694 * If we're handed a bigger struct than we know of,
4695 * ensure all the unknown bits are 0 - i.e. new
4696 * user-space does not rely on any kernel feature
4697 * extensions we dont know about yet.
4699 if (size
> sizeof(*attr
)) {
4700 unsigned char __user
*addr
;
4701 unsigned char __user
*end
;
4704 addr
= (void __user
*)uattr
+ sizeof(*attr
);
4705 end
= (void __user
*)uattr
+ size
;
4707 for (; addr
< end
; addr
++) {
4708 ret
= get_user(val
, addr
);
4714 size
= sizeof(*attr
);
4717 ret
= copy_from_user(attr
, uattr
, size
);
4722 * If the type exists, the corresponding creation will verify
4725 if (attr
->type
>= PERF_TYPE_MAX
)
4728 if (attr
->__reserved_1
)
4731 if (attr
->sample_type
& ~(PERF_SAMPLE_MAX
-1))
4734 if (attr
->read_format
& ~(PERF_FORMAT_MAX
-1))
4741 put_user(sizeof(*attr
), &uattr
->size
);
4746 static int perf_event_set_output(struct perf_event
*event
, int output_fd
)
4748 struct perf_event
*output_event
= NULL
;
4749 struct file
*output_file
= NULL
;
4750 struct perf_event
*old_output
;
4751 int fput_needed
= 0;
4757 output_file
= fget_light(output_fd
, &fput_needed
);
4761 if (output_file
->f_op
!= &perf_fops
)
4764 output_event
= output_file
->private_data
;
4766 /* Don't chain output fds */
4767 if (output_event
->output
)
4770 /* Don't set an output fd when we already have an output channel */
4774 atomic_long_inc(&output_file
->f_count
);
4777 mutex_lock(&event
->mmap_mutex
);
4778 old_output
= event
->output
;
4779 rcu_assign_pointer(event
->output
, output_event
);
4780 mutex_unlock(&event
->mmap_mutex
);
4784 * we need to make sure no existing perf_output_*()
4785 * is still referencing this event.
4788 fput(old_output
->filp
);
4793 fput_light(output_file
, fput_needed
);
4798 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4800 * @attr_uptr: event_id type attributes for monitoring/sampling
4803 * @group_fd: group leader event fd
4805 SYSCALL_DEFINE5(perf_event_open
,
4806 struct perf_event_attr __user
*, attr_uptr
,
4807 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
4809 struct perf_event
*event
, *group_leader
;
4810 struct perf_event_attr attr
;
4811 struct perf_event_context
*ctx
;
4812 struct file
*event_file
= NULL
;
4813 struct file
*group_file
= NULL
;
4815 int fput_needed
= 0;
4818 /* for future expandability... */
4819 if (flags
& ~(PERF_FLAG_FD_NO_GROUP
| PERF_FLAG_FD_OUTPUT
))
4822 err
= perf_copy_attr(attr_uptr
, &attr
);
4826 if (!attr
.exclude_kernel
) {
4827 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
))
4832 if (attr
.sample_freq
> sysctl_perf_event_sample_rate
)
4836 event_fd
= get_unused_fd_flags(O_RDWR
);
4841 * Get the target context (task or percpu):
4843 ctx
= find_get_context(pid
, cpu
);
4850 * Look up the group leader (we will attach this event to it):
4852 group_leader
= NULL
;
4853 if (group_fd
!= -1 && !(flags
& PERF_FLAG_FD_NO_GROUP
)) {
4855 group_file
= fget_light(group_fd
, &fput_needed
);
4857 goto err_put_context
;
4858 if (group_file
->f_op
!= &perf_fops
)
4859 goto err_put_context
;
4861 group_leader
= group_file
->private_data
;
4863 * Do not allow a recursive hierarchy (this new sibling
4864 * becoming part of another group-sibling):
4866 if (group_leader
->group_leader
!= group_leader
)
4867 goto err_put_context
;
4869 * Do not allow to attach to a group in a different
4870 * task or CPU context:
4872 if (group_leader
->ctx
!= ctx
)
4873 goto err_put_context
;
4875 * Only a group leader can be exclusive or pinned
4877 if (attr
.exclusive
|| attr
.pinned
)
4878 goto err_put_context
;
4881 event
= perf_event_alloc(&attr
, cpu
, ctx
, group_leader
,
4882 NULL
, NULL
, GFP_KERNEL
);
4883 err
= PTR_ERR(event
);
4885 goto err_put_context
;
4887 event_file
= anon_inode_getfile("[perf_event]", &perf_fops
, event
, O_RDWR
);
4888 if (IS_ERR(event_file
)) {
4889 err
= PTR_ERR(event_file
);
4890 goto err_free_put_context
;
4893 if (flags
& PERF_FLAG_FD_OUTPUT
) {
4894 err
= perf_event_set_output(event
, group_fd
);
4896 goto err_fput_free_put_context
;
4899 event
->filp
= event_file
;
4900 WARN_ON_ONCE(ctx
->parent_ctx
);
4901 mutex_lock(&ctx
->mutex
);
4902 perf_install_in_context(ctx
, event
, cpu
);
4904 mutex_unlock(&ctx
->mutex
);
4906 event
->owner
= current
;
4907 get_task_struct(current
);
4908 mutex_lock(¤t
->perf_event_mutex
);
4909 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
4910 mutex_unlock(¤t
->perf_event_mutex
);
4912 fput_light(group_file
, fput_needed
);
4913 fd_install(event_fd
, event_file
);
4916 err_fput_free_put_context
:
4918 err_free_put_context
:
4921 fput_light(group_file
, fput_needed
);
4924 put_unused_fd(event_fd
);
4929 * perf_event_create_kernel_counter
4931 * @attr: attributes of the counter to create
4932 * @cpu: cpu in which the counter is bound
4933 * @pid: task to profile
4936 perf_event_create_kernel_counter(struct perf_event_attr
*attr
, int cpu
,
4938 perf_overflow_handler_t overflow_handler
)
4940 struct perf_event
*event
;
4941 struct perf_event_context
*ctx
;
4945 * Get the target context (task or percpu):
4948 ctx
= find_get_context(pid
, cpu
);
4954 event
= perf_event_alloc(attr
, cpu
, ctx
, NULL
,
4955 NULL
, overflow_handler
, GFP_KERNEL
);
4956 if (IS_ERR(event
)) {
4957 err
= PTR_ERR(event
);
4958 goto err_put_context
;
4962 WARN_ON_ONCE(ctx
->parent_ctx
);
4963 mutex_lock(&ctx
->mutex
);
4964 perf_install_in_context(ctx
, event
, cpu
);
4966 mutex_unlock(&ctx
->mutex
);
4968 event
->owner
= current
;
4969 get_task_struct(current
);
4970 mutex_lock(¤t
->perf_event_mutex
);
4971 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
4972 mutex_unlock(¤t
->perf_event_mutex
);
4979 return ERR_PTR(err
);
4981 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter
);
4984 * inherit a event from parent task to child task:
4986 static struct perf_event
*
4987 inherit_event(struct perf_event
*parent_event
,
4988 struct task_struct
*parent
,
4989 struct perf_event_context
*parent_ctx
,
4990 struct task_struct
*child
,
4991 struct perf_event
*group_leader
,
4992 struct perf_event_context
*child_ctx
)
4994 struct perf_event
*child_event
;
4997 * Instead of creating recursive hierarchies of events,
4998 * we link inherited events back to the original parent,
4999 * which has a filp for sure, which we use as the reference
5002 if (parent_event
->parent
)
5003 parent_event
= parent_event
->parent
;
5005 child_event
= perf_event_alloc(&parent_event
->attr
,
5006 parent_event
->cpu
, child_ctx
,
5007 group_leader
, parent_event
,
5009 if (IS_ERR(child_event
))
5014 * Make the child state follow the state of the parent event,
5015 * not its attr.disabled bit. We hold the parent's mutex,
5016 * so we won't race with perf_event_{en, dis}able_family.
5018 if (parent_event
->state
>= PERF_EVENT_STATE_INACTIVE
)
5019 child_event
->state
= PERF_EVENT_STATE_INACTIVE
;
5021 child_event
->state
= PERF_EVENT_STATE_OFF
;
5023 if (parent_event
->attr
.freq
) {
5024 u64 sample_period
= parent_event
->hw
.sample_period
;
5025 struct hw_perf_event
*hwc
= &child_event
->hw
;
5027 hwc
->sample_period
= sample_period
;
5028 hwc
->last_period
= sample_period
;
5030 atomic64_set(&hwc
->period_left
, sample_period
);
5033 child_event
->overflow_handler
= parent_event
->overflow_handler
;
5036 * Link it up in the child's context:
5038 add_event_to_ctx(child_event
, child_ctx
);
5041 * Get a reference to the parent filp - we will fput it
5042 * when the child event exits. This is safe to do because
5043 * we are in the parent and we know that the filp still
5044 * exists and has a nonzero count:
5046 atomic_long_inc(&parent_event
->filp
->f_count
);
5049 * Link this into the parent event's child list
5051 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5052 mutex_lock(&parent_event
->child_mutex
);
5053 list_add_tail(&child_event
->child_list
, &parent_event
->child_list
);
5054 mutex_unlock(&parent_event
->child_mutex
);
5059 static int inherit_group(struct perf_event
*parent_event
,
5060 struct task_struct
*parent
,
5061 struct perf_event_context
*parent_ctx
,
5062 struct task_struct
*child
,
5063 struct perf_event_context
*child_ctx
)
5065 struct perf_event
*leader
;
5066 struct perf_event
*sub
;
5067 struct perf_event
*child_ctr
;
5069 leader
= inherit_event(parent_event
, parent
, parent_ctx
,
5070 child
, NULL
, child_ctx
);
5072 return PTR_ERR(leader
);
5073 list_for_each_entry(sub
, &parent_event
->sibling_list
, group_entry
) {
5074 child_ctr
= inherit_event(sub
, parent
, parent_ctx
,
5075 child
, leader
, child_ctx
);
5076 if (IS_ERR(child_ctr
))
5077 return PTR_ERR(child_ctr
);
5082 static void sync_child_event(struct perf_event
*child_event
,
5083 struct task_struct
*child
)
5085 struct perf_event
*parent_event
= child_event
->parent
;
5088 if (child_event
->attr
.inherit_stat
)
5089 perf_event_read_event(child_event
, child
);
5091 child_val
= atomic64_read(&child_event
->count
);
5094 * Add back the child's count to the parent's count:
5096 atomic64_add(child_val
, &parent_event
->count
);
5097 atomic64_add(child_event
->total_time_enabled
,
5098 &parent_event
->child_total_time_enabled
);
5099 atomic64_add(child_event
->total_time_running
,
5100 &parent_event
->child_total_time_running
);
5103 * Remove this event from the parent's list
5105 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5106 mutex_lock(&parent_event
->child_mutex
);
5107 list_del_init(&child_event
->child_list
);
5108 mutex_unlock(&parent_event
->child_mutex
);
5111 * Release the parent event, if this was the last
5114 fput(parent_event
->filp
);
5118 __perf_event_exit_task(struct perf_event
*child_event
,
5119 struct perf_event_context
*child_ctx
,
5120 struct task_struct
*child
)
5122 struct perf_event
*parent_event
;
5124 perf_event_remove_from_context(child_event
);
5126 parent_event
= child_event
->parent
;
5128 * It can happen that parent exits first, and has events
5129 * that are still around due to the child reference. These
5130 * events need to be zapped - but otherwise linger.
5133 sync_child_event(child_event
, child
);
5134 free_event(child_event
);
5139 * When a child task exits, feed back event values to parent events.
5141 void perf_event_exit_task(struct task_struct
*child
)
5143 struct perf_event
*child_event
, *tmp
;
5144 struct perf_event_context
*child_ctx
;
5145 unsigned long flags
;
5147 if (likely(!child
->perf_event_ctxp
)) {
5148 perf_event_task(child
, NULL
, 0);
5152 local_irq_save(flags
);
5154 * We can't reschedule here because interrupts are disabled,
5155 * and either child is current or it is a task that can't be
5156 * scheduled, so we are now safe from rescheduling changing
5159 child_ctx
= child
->perf_event_ctxp
;
5160 __perf_event_task_sched_out(child_ctx
);
5163 * Take the context lock here so that if find_get_context is
5164 * reading child->perf_event_ctxp, we wait until it has
5165 * incremented the context's refcount before we do put_ctx below.
5167 raw_spin_lock(&child_ctx
->lock
);
5168 child
->perf_event_ctxp
= NULL
;
5170 * If this context is a clone; unclone it so it can't get
5171 * swapped to another process while we're removing all
5172 * the events from it.
5174 unclone_ctx(child_ctx
);
5175 update_context_time(child_ctx
);
5176 raw_spin_unlock_irqrestore(&child_ctx
->lock
, flags
);
5179 * Report the task dead after unscheduling the events so that we
5180 * won't get any samples after PERF_RECORD_EXIT. We can however still
5181 * get a few PERF_RECORD_READ events.
5183 perf_event_task(child
, child_ctx
, 0);
5186 * We can recurse on the same lock type through:
5188 * __perf_event_exit_task()
5189 * sync_child_event()
5190 * fput(parent_event->filp)
5192 * mutex_lock(&ctx->mutex)
5194 * But since its the parent context it won't be the same instance.
5196 mutex_lock_nested(&child_ctx
->mutex
, SINGLE_DEPTH_NESTING
);
5199 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->pinned_groups
,
5201 __perf_event_exit_task(child_event
, child_ctx
, child
);
5203 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->flexible_groups
,
5205 __perf_event_exit_task(child_event
, child_ctx
, child
);
5208 * If the last event was a group event, it will have appended all
5209 * its siblings to the list, but we obtained 'tmp' before that which
5210 * will still point to the list head terminating the iteration.
5212 if (!list_empty(&child_ctx
->pinned_groups
) ||
5213 !list_empty(&child_ctx
->flexible_groups
))
5216 mutex_unlock(&child_ctx
->mutex
);
5221 static void perf_free_event(struct perf_event
*event
,
5222 struct perf_event_context
*ctx
)
5224 struct perf_event
*parent
= event
->parent
;
5226 if (WARN_ON_ONCE(!parent
))
5229 mutex_lock(&parent
->child_mutex
);
5230 list_del_init(&event
->child_list
);
5231 mutex_unlock(&parent
->child_mutex
);
5235 list_del_event(event
, ctx
);
5240 * free an unexposed, unused context as created by inheritance by
5241 * init_task below, used by fork() in case of fail.
5243 void perf_event_free_task(struct task_struct
*task
)
5245 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
5246 struct perf_event
*event
, *tmp
;
5251 mutex_lock(&ctx
->mutex
);
5253 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5254 perf_free_event(event
, ctx
);
5256 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
,
5258 perf_free_event(event
, ctx
);
5260 if (!list_empty(&ctx
->pinned_groups
) ||
5261 !list_empty(&ctx
->flexible_groups
))
5264 mutex_unlock(&ctx
->mutex
);
5270 inherit_task_group(struct perf_event
*event
, struct task_struct
*parent
,
5271 struct perf_event_context
*parent_ctx
,
5272 struct task_struct
*child
,
5276 struct perf_event_context
*child_ctx
= child
->perf_event_ctxp
;
5278 if (!event
->attr
.inherit
) {
5285 * This is executed from the parent task context, so
5286 * inherit events that have been marked for cloning.
5287 * First allocate and initialize a context for the
5291 child_ctx
= kzalloc(sizeof(struct perf_event_context
),
5296 __perf_event_init_context(child_ctx
, child
);
5297 child
->perf_event_ctxp
= child_ctx
;
5298 get_task_struct(child
);
5301 ret
= inherit_group(event
, parent
, parent_ctx
,
5312 * Initialize the perf_event context in task_struct
5314 int perf_event_init_task(struct task_struct
*child
)
5316 struct perf_event_context
*child_ctx
, *parent_ctx
;
5317 struct perf_event_context
*cloned_ctx
;
5318 struct perf_event
*event
;
5319 struct task_struct
*parent
= current
;
5320 int inherited_all
= 1;
5323 child
->perf_event_ctxp
= NULL
;
5325 mutex_init(&child
->perf_event_mutex
);
5326 INIT_LIST_HEAD(&child
->perf_event_list
);
5328 if (likely(!parent
->perf_event_ctxp
))
5332 * If the parent's context is a clone, pin it so it won't get
5335 parent_ctx
= perf_pin_task_context(parent
);
5338 * No need to check if parent_ctx != NULL here; since we saw
5339 * it non-NULL earlier, the only reason for it to become NULL
5340 * is if we exit, and since we're currently in the middle of
5341 * a fork we can't be exiting at the same time.
5345 * Lock the parent list. No need to lock the child - not PID
5346 * hashed yet and not running, so nobody can access it.
5348 mutex_lock(&parent_ctx
->mutex
);
5351 * We dont have to disable NMIs - we are only looking at
5352 * the list, not manipulating it:
5354 list_for_each_entry(event
, &parent_ctx
->pinned_groups
, group_entry
) {
5355 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5361 list_for_each_entry(event
, &parent_ctx
->flexible_groups
, group_entry
) {
5362 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5368 child_ctx
= child
->perf_event_ctxp
;
5370 if (child_ctx
&& inherited_all
) {
5372 * Mark the child context as a clone of the parent
5373 * context, or of whatever the parent is a clone of.
5374 * Note that if the parent is a clone, it could get
5375 * uncloned at any point, but that doesn't matter
5376 * because the list of events and the generation
5377 * count can't have changed since we took the mutex.
5379 cloned_ctx
= rcu_dereference(parent_ctx
->parent_ctx
);
5381 child_ctx
->parent_ctx
= cloned_ctx
;
5382 child_ctx
->parent_gen
= parent_ctx
->parent_gen
;
5384 child_ctx
->parent_ctx
= parent_ctx
;
5385 child_ctx
->parent_gen
= parent_ctx
->generation
;
5387 get_ctx(child_ctx
->parent_ctx
);
5390 mutex_unlock(&parent_ctx
->mutex
);
5392 perf_unpin_context(parent_ctx
);
5397 static void __init
perf_event_init_all_cpus(void)
5400 struct perf_cpu_context
*cpuctx
;
5402 for_each_possible_cpu(cpu
) {
5403 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5404 __perf_event_init_context(&cpuctx
->ctx
, NULL
);
5408 static void __cpuinit
perf_event_init_cpu(int cpu
)
5410 struct perf_cpu_context
*cpuctx
;
5412 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5414 spin_lock(&perf_resource_lock
);
5415 cpuctx
->max_pertask
= perf_max_events
- perf_reserved_percpu
;
5416 spin_unlock(&perf_resource_lock
);
5419 #ifdef CONFIG_HOTPLUG_CPU
5420 static void __perf_event_exit_cpu(void *info
)
5422 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
5423 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5424 struct perf_event
*event
, *tmp
;
5426 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5427 __perf_event_remove_from_context(event
);
5428 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
, group_entry
)
5429 __perf_event_remove_from_context(event
);
5431 static void perf_event_exit_cpu(int cpu
)
5433 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5434 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5436 mutex_lock(&ctx
->mutex
);
5437 smp_call_function_single(cpu
, __perf_event_exit_cpu
, NULL
, 1);
5438 mutex_unlock(&ctx
->mutex
);
5441 static inline void perf_event_exit_cpu(int cpu
) { }
5444 static int __cpuinit
5445 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
5447 unsigned int cpu
= (long)hcpu
;
5451 case CPU_UP_PREPARE
:
5452 case CPU_UP_PREPARE_FROZEN
:
5453 perf_event_init_cpu(cpu
);
5456 case CPU_DOWN_PREPARE
:
5457 case CPU_DOWN_PREPARE_FROZEN
:
5458 perf_event_exit_cpu(cpu
);
5469 * This has to have a higher priority than migration_notifier in sched.c.
5471 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
5472 .notifier_call
= perf_cpu_notify
,
5476 void __init
perf_event_init(void)
5478 perf_event_init_all_cpus();
5479 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
5480 (void *)(long)smp_processor_id());
5481 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_ONLINE
,
5482 (void *)(long)smp_processor_id());
5483 register_cpu_notifier(&perf_cpu_nb
);
5486 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class,
5487 struct sysdev_class_attribute
*attr
,
5490 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
5494 perf_set_reserve_percpu(struct sysdev_class
*class,
5495 struct sysdev_class_attribute
*attr
,
5499 struct perf_cpu_context
*cpuctx
;
5503 err
= strict_strtoul(buf
, 10, &val
);
5506 if (val
> perf_max_events
)
5509 spin_lock(&perf_resource_lock
);
5510 perf_reserved_percpu
= val
;
5511 for_each_online_cpu(cpu
) {
5512 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5513 raw_spin_lock_irq(&cpuctx
->ctx
.lock
);
5514 mpt
= min(perf_max_events
- cpuctx
->ctx
.nr_events
,
5515 perf_max_events
- perf_reserved_percpu
);
5516 cpuctx
->max_pertask
= mpt
;
5517 raw_spin_unlock_irq(&cpuctx
->ctx
.lock
);
5519 spin_unlock(&perf_resource_lock
);
5524 static ssize_t
perf_show_overcommit(struct sysdev_class
*class,
5525 struct sysdev_class_attribute
*attr
,
5528 return sprintf(buf
, "%d\n", perf_overcommit
);
5532 perf_set_overcommit(struct sysdev_class
*class,
5533 struct sysdev_class_attribute
*attr
,
5534 const char *buf
, size_t count
)
5539 err
= strict_strtoul(buf
, 10, &val
);
5545 spin_lock(&perf_resource_lock
);
5546 perf_overcommit
= val
;
5547 spin_unlock(&perf_resource_lock
);
5552 static SYSDEV_CLASS_ATTR(
5555 perf_show_reserve_percpu
,
5556 perf_set_reserve_percpu
5559 static SYSDEV_CLASS_ATTR(
5562 perf_show_overcommit
,
5566 static struct attribute
*perfclass_attrs
[] = {
5567 &attr_reserve_percpu
.attr
,
5568 &attr_overcommit
.attr
,
5572 static struct attribute_group perfclass_attr_group
= {
5573 .attrs
= perfclass_attrs
,
5574 .name
= "perf_events",
5577 static int __init
perf_event_sysfs_init(void)
5579 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
5580 &perfclass_attr_group
);
5582 device_initcall(perf_event_sysfs_init
);