drm/i915/debugfs: Report ring in error state
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / perf_event.c
blobf309e8014c7853105d1a38bc662f10164dc4d3d1
1 /*
2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
35 #include <asm/irq_regs.h>
37 atomic_t perf_task_events __read_mostly;
38 static atomic_t nr_mmap_events __read_mostly;
39 static atomic_t nr_comm_events __read_mostly;
40 static atomic_t nr_task_events __read_mostly;
42 static LIST_HEAD(pmus);
43 static DEFINE_MUTEX(pmus_lock);
44 static struct srcu_struct pmus_srcu;
47 * perf event paranoia level:
48 * -1 - not paranoid at all
49 * 0 - disallow raw tracepoint access for unpriv
50 * 1 - disallow cpu events for unpriv
51 * 2 - disallow kernel profiling for unpriv
53 int sysctl_perf_event_paranoid __read_mostly = 1;
55 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
58 * max perf event sample rate
60 int sysctl_perf_event_sample_rate __read_mostly = 100000;
62 static atomic64_t perf_event_id;
64 void __weak perf_event_print_debug(void) { }
66 extern __weak const char *perf_pmu_name(void)
68 return "pmu";
71 void perf_pmu_disable(struct pmu *pmu)
73 int *count = this_cpu_ptr(pmu->pmu_disable_count);
74 if (!(*count)++)
75 pmu->pmu_disable(pmu);
78 void perf_pmu_enable(struct pmu *pmu)
80 int *count = this_cpu_ptr(pmu->pmu_disable_count);
81 if (!--(*count))
82 pmu->pmu_enable(pmu);
85 static DEFINE_PER_CPU(struct list_head, rotation_list);
88 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
89 * because they're strictly cpu affine and rotate_start is called with IRQs
90 * disabled, while rotate_context is called from IRQ context.
92 static void perf_pmu_rotate_start(struct pmu *pmu)
94 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95 struct list_head *head = &__get_cpu_var(rotation_list);
97 WARN_ON(!irqs_disabled());
99 if (list_empty(&cpuctx->rotation_list))
100 list_add(&cpuctx->rotation_list, head);
103 static void get_ctx(struct perf_event_context *ctx)
105 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
108 static void free_ctx(struct rcu_head *head)
110 struct perf_event_context *ctx;
112 ctx = container_of(head, struct perf_event_context, rcu_head);
113 kfree(ctx);
116 static void put_ctx(struct perf_event_context *ctx)
118 if (atomic_dec_and_test(&ctx->refcount)) {
119 if (ctx->parent_ctx)
120 put_ctx(ctx->parent_ctx);
121 if (ctx->task)
122 put_task_struct(ctx->task);
123 call_rcu(&ctx->rcu_head, free_ctx);
127 static void unclone_ctx(struct perf_event_context *ctx)
129 if (ctx->parent_ctx) {
130 put_ctx(ctx->parent_ctx);
131 ctx->parent_ctx = NULL;
136 * If we inherit events we want to return the parent event id
137 * to userspace.
139 static u64 primary_event_id(struct perf_event *event)
141 u64 id = event->id;
143 if (event->parent)
144 id = event->parent->id;
146 return id;
150 * Get the perf_event_context for a task and lock it.
151 * This has to cope with with the fact that until it is locked,
152 * the context could get moved to another task.
154 static struct perf_event_context *
155 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
157 struct perf_event_context *ctx;
159 rcu_read_lock();
160 retry:
161 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
162 if (ctx) {
164 * If this context is a clone of another, it might
165 * get swapped for another underneath us by
166 * perf_event_task_sched_out, though the
167 * rcu_read_lock() protects us from any context
168 * getting freed. Lock the context and check if it
169 * got swapped before we could get the lock, and retry
170 * if so. If we locked the right context, then it
171 * can't get swapped on us any more.
173 raw_spin_lock_irqsave(&ctx->lock, *flags);
174 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
175 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
176 goto retry;
179 if (!atomic_inc_not_zero(&ctx->refcount)) {
180 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
181 ctx = NULL;
184 rcu_read_unlock();
185 return ctx;
189 * Get the context for a task and increment its pin_count so it
190 * can't get swapped to another task. This also increments its
191 * reference count so that the context can't get freed.
193 static struct perf_event_context *
194 perf_pin_task_context(struct task_struct *task, int ctxn)
196 struct perf_event_context *ctx;
197 unsigned long flags;
199 ctx = perf_lock_task_context(task, ctxn, &flags);
200 if (ctx) {
201 ++ctx->pin_count;
202 raw_spin_unlock_irqrestore(&ctx->lock, flags);
204 return ctx;
207 static void perf_unpin_context(struct perf_event_context *ctx)
209 unsigned long flags;
211 raw_spin_lock_irqsave(&ctx->lock, flags);
212 --ctx->pin_count;
213 raw_spin_unlock_irqrestore(&ctx->lock, flags);
214 put_ctx(ctx);
217 static inline u64 perf_clock(void)
219 return local_clock();
223 * Update the record of the current time in a context.
225 static void update_context_time(struct perf_event_context *ctx)
227 u64 now = perf_clock();
229 ctx->time += now - ctx->timestamp;
230 ctx->timestamp = now;
234 * Update the total_time_enabled and total_time_running fields for a event.
236 static void update_event_times(struct perf_event *event)
238 struct perf_event_context *ctx = event->ctx;
239 u64 run_end;
241 if (event->state < PERF_EVENT_STATE_INACTIVE ||
242 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
243 return;
245 if (ctx->is_active)
246 run_end = ctx->time;
247 else
248 run_end = event->tstamp_stopped;
250 event->total_time_enabled = run_end - event->tstamp_enabled;
252 if (event->state == PERF_EVENT_STATE_INACTIVE)
253 run_end = event->tstamp_stopped;
254 else
255 run_end = ctx->time;
257 event->total_time_running = run_end - event->tstamp_running;
261 * Update total_time_enabled and total_time_running for all events in a group.
263 static void update_group_times(struct perf_event *leader)
265 struct perf_event *event;
267 update_event_times(leader);
268 list_for_each_entry(event, &leader->sibling_list, group_entry)
269 update_event_times(event);
272 static struct list_head *
273 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
275 if (event->attr.pinned)
276 return &ctx->pinned_groups;
277 else
278 return &ctx->flexible_groups;
282 * Add a event from the lists for its context.
283 * Must be called with ctx->mutex and ctx->lock held.
285 static void
286 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
288 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
289 event->attach_state |= PERF_ATTACH_CONTEXT;
292 * If we're a stand alone event or group leader, we go to the context
293 * list, group events are kept attached to the group so that
294 * perf_group_detach can, at all times, locate all siblings.
296 if (event->group_leader == event) {
297 struct list_head *list;
299 if (is_software_event(event))
300 event->group_flags |= PERF_GROUP_SOFTWARE;
302 list = ctx_group_list(event, ctx);
303 list_add_tail(&event->group_entry, list);
306 list_add_rcu(&event->event_entry, &ctx->event_list);
307 if (!ctx->nr_events)
308 perf_pmu_rotate_start(ctx->pmu);
309 ctx->nr_events++;
310 if (event->attr.inherit_stat)
311 ctx->nr_stat++;
314 static void perf_group_attach(struct perf_event *event)
316 struct perf_event *group_leader = event->group_leader;
319 * We can have double attach due to group movement in perf_event_open.
321 if (event->attach_state & PERF_ATTACH_GROUP)
322 return;
324 event->attach_state |= PERF_ATTACH_GROUP;
326 if (group_leader == event)
327 return;
329 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
330 !is_software_event(event))
331 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
333 list_add_tail(&event->group_entry, &group_leader->sibling_list);
334 group_leader->nr_siblings++;
338 * Remove a event from the lists for its context.
339 * Must be called with ctx->mutex and ctx->lock held.
341 static void
342 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
345 * We can have double detach due to exit/hot-unplug + close.
347 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
348 return;
350 event->attach_state &= ~PERF_ATTACH_CONTEXT;
352 ctx->nr_events--;
353 if (event->attr.inherit_stat)
354 ctx->nr_stat--;
356 list_del_rcu(&event->event_entry);
358 if (event->group_leader == event)
359 list_del_init(&event->group_entry);
361 update_group_times(event);
364 * If event was in error state, then keep it
365 * that way, otherwise bogus counts will be
366 * returned on read(). The only way to get out
367 * of error state is by explicit re-enabling
368 * of the event
370 if (event->state > PERF_EVENT_STATE_OFF)
371 event->state = PERF_EVENT_STATE_OFF;
374 static void perf_group_detach(struct perf_event *event)
376 struct perf_event *sibling, *tmp;
377 struct list_head *list = NULL;
380 * We can have double detach due to exit/hot-unplug + close.
382 if (!(event->attach_state & PERF_ATTACH_GROUP))
383 return;
385 event->attach_state &= ~PERF_ATTACH_GROUP;
388 * If this is a sibling, remove it from its group.
390 if (event->group_leader != event) {
391 list_del_init(&event->group_entry);
392 event->group_leader->nr_siblings--;
393 return;
396 if (!list_empty(&event->group_entry))
397 list = &event->group_entry;
400 * If this was a group event with sibling events then
401 * upgrade the siblings to singleton events by adding them
402 * to whatever list we are on.
404 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
405 if (list)
406 list_move_tail(&sibling->group_entry, list);
407 sibling->group_leader = sibling;
409 /* Inherit group flags from the previous leader */
410 sibling->group_flags = event->group_flags;
414 static inline int
415 event_filter_match(struct perf_event *event)
417 return event->cpu == -1 || event->cpu == smp_processor_id();
420 static int
421 __event_sched_out(struct perf_event *event,
422 struct perf_cpu_context *cpuctx,
423 struct perf_event_context *ctx)
425 u64 delta;
427 * An event which could not be activated because of
428 * filter mismatch still needs to have its timings
429 * maintained, otherwise bogus information is return
430 * via read() for time_enabled, time_running:
432 if (event->state == PERF_EVENT_STATE_INACTIVE
433 && !event_filter_match(event)) {
434 delta = ctx->time - event->tstamp_stopped;
435 event->tstamp_running += delta;
436 event->tstamp_stopped = ctx->time;
439 if (event->state != PERF_EVENT_STATE_ACTIVE)
440 return 0;
442 event->state = PERF_EVENT_STATE_INACTIVE;
443 if (event->pending_disable) {
444 event->pending_disable = 0;
445 event->state = PERF_EVENT_STATE_OFF;
447 event->pmu->del(event, 0);
448 event->oncpu = -1;
450 if (!is_software_event(event))
451 cpuctx->active_oncpu--;
452 ctx->nr_active--;
453 if (event->attr.exclusive || !cpuctx->active_oncpu)
454 cpuctx->exclusive = 0;
455 return 1;
458 static void
459 event_sched_out(struct perf_event *event,
460 struct perf_cpu_context *cpuctx,
461 struct perf_event_context *ctx)
463 int ret;
465 ret = __event_sched_out(event, cpuctx, ctx);
466 if (ret)
467 event->tstamp_stopped = ctx->time;
470 static void
471 group_sched_out(struct perf_event *group_event,
472 struct perf_cpu_context *cpuctx,
473 struct perf_event_context *ctx)
475 struct perf_event *event;
476 int state = group_event->state;
478 event_sched_out(group_event, cpuctx, ctx);
481 * Schedule out siblings (if any):
483 list_for_each_entry(event, &group_event->sibling_list, group_entry)
484 event_sched_out(event, cpuctx, ctx);
486 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
487 cpuctx->exclusive = 0;
490 static inline struct perf_cpu_context *
491 __get_cpu_context(struct perf_event_context *ctx)
493 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
497 * Cross CPU call to remove a performance event
499 * We disable the event on the hardware level first. After that we
500 * remove it from the context list.
502 static void __perf_event_remove_from_context(void *info)
504 struct perf_event *event = info;
505 struct perf_event_context *ctx = event->ctx;
506 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
509 * If this is a task context, we need to check whether it is
510 * the current task context of this cpu. If not it has been
511 * scheduled out before the smp call arrived.
513 if (ctx->task && cpuctx->task_ctx != ctx)
514 return;
516 raw_spin_lock(&ctx->lock);
518 event_sched_out(event, cpuctx, ctx);
520 list_del_event(event, ctx);
522 raw_spin_unlock(&ctx->lock);
527 * Remove the event from a task's (or a CPU's) list of events.
529 * Must be called with ctx->mutex held.
531 * CPU events are removed with a smp call. For task events we only
532 * call when the task is on a CPU.
534 * If event->ctx is a cloned context, callers must make sure that
535 * every task struct that event->ctx->task could possibly point to
536 * remains valid. This is OK when called from perf_release since
537 * that only calls us on the top-level context, which can't be a clone.
538 * When called from perf_event_exit_task, it's OK because the
539 * context has been detached from its task.
541 static void perf_event_remove_from_context(struct perf_event *event)
543 struct perf_event_context *ctx = event->ctx;
544 struct task_struct *task = ctx->task;
546 if (!task) {
548 * Per cpu events are removed via an smp call and
549 * the removal is always successful.
551 smp_call_function_single(event->cpu,
552 __perf_event_remove_from_context,
553 event, 1);
554 return;
557 retry:
558 task_oncpu_function_call(task, __perf_event_remove_from_context,
559 event);
561 raw_spin_lock_irq(&ctx->lock);
563 * If the context is active we need to retry the smp call.
565 if (ctx->nr_active && !list_empty(&event->group_entry)) {
566 raw_spin_unlock_irq(&ctx->lock);
567 goto retry;
571 * The lock prevents that this context is scheduled in so we
572 * can remove the event safely, if the call above did not
573 * succeed.
575 if (!list_empty(&event->group_entry))
576 list_del_event(event, ctx);
577 raw_spin_unlock_irq(&ctx->lock);
581 * Cross CPU call to disable a performance event
583 static void __perf_event_disable(void *info)
585 struct perf_event *event = info;
586 struct perf_event_context *ctx = event->ctx;
587 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
590 * If this is a per-task event, need to check whether this
591 * event's task is the current task on this cpu.
593 if (ctx->task && cpuctx->task_ctx != ctx)
594 return;
596 raw_spin_lock(&ctx->lock);
599 * If the event is on, turn it off.
600 * If it is in error state, leave it in error state.
602 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
603 update_context_time(ctx);
604 update_group_times(event);
605 if (event == event->group_leader)
606 group_sched_out(event, cpuctx, ctx);
607 else
608 event_sched_out(event, cpuctx, ctx);
609 event->state = PERF_EVENT_STATE_OFF;
612 raw_spin_unlock(&ctx->lock);
616 * Disable a event.
618 * If event->ctx is a cloned context, callers must make sure that
619 * every task struct that event->ctx->task could possibly point to
620 * remains valid. This condition is satisifed when called through
621 * perf_event_for_each_child or perf_event_for_each because they
622 * hold the top-level event's child_mutex, so any descendant that
623 * goes to exit will block in sync_child_event.
624 * When called from perf_pending_event it's OK because event->ctx
625 * is the current context on this CPU and preemption is disabled,
626 * hence we can't get into perf_event_task_sched_out for this context.
628 void perf_event_disable(struct perf_event *event)
630 struct perf_event_context *ctx = event->ctx;
631 struct task_struct *task = ctx->task;
633 if (!task) {
635 * Disable the event on the cpu that it's on
637 smp_call_function_single(event->cpu, __perf_event_disable,
638 event, 1);
639 return;
642 retry:
643 task_oncpu_function_call(task, __perf_event_disable, event);
645 raw_spin_lock_irq(&ctx->lock);
647 * If the event is still active, we need to retry the cross-call.
649 if (event->state == PERF_EVENT_STATE_ACTIVE) {
650 raw_spin_unlock_irq(&ctx->lock);
651 goto retry;
655 * Since we have the lock this context can't be scheduled
656 * in, so we can change the state safely.
658 if (event->state == PERF_EVENT_STATE_INACTIVE) {
659 update_group_times(event);
660 event->state = PERF_EVENT_STATE_OFF;
663 raw_spin_unlock_irq(&ctx->lock);
666 static int
667 __event_sched_in(struct perf_event *event,
668 struct perf_cpu_context *cpuctx,
669 struct perf_event_context *ctx)
671 if (event->state <= PERF_EVENT_STATE_OFF)
672 return 0;
674 event->state = PERF_EVENT_STATE_ACTIVE;
675 event->oncpu = smp_processor_id();
677 * The new state must be visible before we turn it on in the hardware:
679 smp_wmb();
681 if (event->pmu->add(event, PERF_EF_START)) {
682 event->state = PERF_EVENT_STATE_INACTIVE;
683 event->oncpu = -1;
684 return -EAGAIN;
687 if (!is_software_event(event))
688 cpuctx->active_oncpu++;
689 ctx->nr_active++;
691 if (event->attr.exclusive)
692 cpuctx->exclusive = 1;
694 return 0;
697 static inline int
698 event_sched_in(struct perf_event *event,
699 struct perf_cpu_context *cpuctx,
700 struct perf_event_context *ctx)
702 int ret = __event_sched_in(event, cpuctx, ctx);
703 if (ret)
704 return ret;
705 event->tstamp_running += ctx->time - event->tstamp_stopped;
706 return 0;
709 static void
710 group_commit_event_sched_in(struct perf_event *group_event,
711 struct perf_cpu_context *cpuctx,
712 struct perf_event_context *ctx)
714 struct perf_event *event;
715 u64 now = ctx->time;
717 group_event->tstamp_running += now - group_event->tstamp_stopped;
719 * Schedule in siblings as one group (if any):
721 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
722 event->tstamp_running += now - event->tstamp_stopped;
726 static int
727 group_sched_in(struct perf_event *group_event,
728 struct perf_cpu_context *cpuctx,
729 struct perf_event_context *ctx)
731 struct perf_event *event, *partial_group = NULL;
732 struct pmu *pmu = group_event->pmu;
734 if (group_event->state == PERF_EVENT_STATE_OFF)
735 return 0;
737 pmu->start_txn(pmu);
740 * use __event_sched_in() to delay updating tstamp_running
741 * until the transaction is committed. In case of failure
742 * we will keep an unmodified tstamp_running which is a
743 * requirement to get correct timing information
745 if (__event_sched_in(group_event, cpuctx, ctx)) {
746 pmu->cancel_txn(pmu);
747 return -EAGAIN;
751 * Schedule in siblings as one group (if any):
753 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
754 if (__event_sched_in(event, cpuctx, ctx)) {
755 partial_group = event;
756 goto group_error;
760 if (!pmu->commit_txn(pmu)) {
761 /* commit tstamp_running */
762 group_commit_event_sched_in(group_event, cpuctx, ctx);
763 return 0;
765 group_error:
767 * Groups can be scheduled in as one unit only, so undo any
768 * partial group before returning:
770 * use __event_sched_out() to avoid updating tstamp_stopped
771 * because the event never actually ran
773 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
774 if (event == partial_group)
775 break;
776 __event_sched_out(event, cpuctx, ctx);
778 __event_sched_out(group_event, cpuctx, ctx);
780 pmu->cancel_txn(pmu);
782 return -EAGAIN;
786 * Work out whether we can put this event group on the CPU now.
788 static int group_can_go_on(struct perf_event *event,
789 struct perf_cpu_context *cpuctx,
790 int can_add_hw)
793 * Groups consisting entirely of software events can always go on.
795 if (event->group_flags & PERF_GROUP_SOFTWARE)
796 return 1;
798 * If an exclusive group is already on, no other hardware
799 * events can go on.
801 if (cpuctx->exclusive)
802 return 0;
804 * If this group is exclusive and there are already
805 * events on the CPU, it can't go on.
807 if (event->attr.exclusive && cpuctx->active_oncpu)
808 return 0;
810 * Otherwise, try to add it if all previous groups were able
811 * to go on.
813 return can_add_hw;
816 static void add_event_to_ctx(struct perf_event *event,
817 struct perf_event_context *ctx)
819 list_add_event(event, ctx);
820 perf_group_attach(event);
821 event->tstamp_enabled = ctx->time;
822 event->tstamp_running = ctx->time;
823 event->tstamp_stopped = ctx->time;
827 * Cross CPU call to install and enable a performance event
829 * Must be called with ctx->mutex held
831 static void __perf_install_in_context(void *info)
833 struct perf_event *event = info;
834 struct perf_event_context *ctx = event->ctx;
835 struct perf_event *leader = event->group_leader;
836 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
837 int err;
840 * If this is a task context, we need to check whether it is
841 * the current task context of this cpu. If not it has been
842 * scheduled out before the smp call arrived.
843 * Or possibly this is the right context but it isn't
844 * on this cpu because it had no events.
846 if (ctx->task && cpuctx->task_ctx != ctx) {
847 if (cpuctx->task_ctx || ctx->task != current)
848 return;
849 cpuctx->task_ctx = ctx;
852 raw_spin_lock(&ctx->lock);
853 ctx->is_active = 1;
854 update_context_time(ctx);
856 add_event_to_ctx(event, ctx);
858 if (event->cpu != -1 && event->cpu != smp_processor_id())
859 goto unlock;
862 * Don't put the event on if it is disabled or if
863 * it is in a group and the group isn't on.
865 if (event->state != PERF_EVENT_STATE_INACTIVE ||
866 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
867 goto unlock;
870 * An exclusive event can't go on if there are already active
871 * hardware events, and no hardware event can go on if there
872 * is already an exclusive event on.
874 if (!group_can_go_on(event, cpuctx, 1))
875 err = -EEXIST;
876 else
877 err = event_sched_in(event, cpuctx, ctx);
879 if (err) {
881 * This event couldn't go on. If it is in a group
882 * then we have to pull the whole group off.
883 * If the event group is pinned then put it in error state.
885 if (leader != event)
886 group_sched_out(leader, cpuctx, ctx);
887 if (leader->attr.pinned) {
888 update_group_times(leader);
889 leader->state = PERF_EVENT_STATE_ERROR;
893 unlock:
894 raw_spin_unlock(&ctx->lock);
898 * Attach a performance event to a context
900 * First we add the event to the list with the hardware enable bit
901 * in event->hw_config cleared.
903 * If the event is attached to a task which is on a CPU we use a smp
904 * call to enable it in the task context. The task might have been
905 * scheduled away, but we check this in the smp call again.
907 * Must be called with ctx->mutex held.
909 static void
910 perf_install_in_context(struct perf_event_context *ctx,
911 struct perf_event *event,
912 int cpu)
914 struct task_struct *task = ctx->task;
916 event->ctx = ctx;
918 if (!task) {
920 * Per cpu events are installed via an smp call and
921 * the install is always successful.
923 smp_call_function_single(cpu, __perf_install_in_context,
924 event, 1);
925 return;
928 retry:
929 task_oncpu_function_call(task, __perf_install_in_context,
930 event);
932 raw_spin_lock_irq(&ctx->lock);
934 * we need to retry the smp call.
936 if (ctx->is_active && list_empty(&event->group_entry)) {
937 raw_spin_unlock_irq(&ctx->lock);
938 goto retry;
942 * The lock prevents that this context is scheduled in so we
943 * can add the event safely, if it the call above did not
944 * succeed.
946 if (list_empty(&event->group_entry))
947 add_event_to_ctx(event, ctx);
948 raw_spin_unlock_irq(&ctx->lock);
952 * Put a event into inactive state and update time fields.
953 * Enabling the leader of a group effectively enables all
954 * the group members that aren't explicitly disabled, so we
955 * have to update their ->tstamp_enabled also.
956 * Note: this works for group members as well as group leaders
957 * since the non-leader members' sibling_lists will be empty.
959 static void __perf_event_mark_enabled(struct perf_event *event,
960 struct perf_event_context *ctx)
962 struct perf_event *sub;
964 event->state = PERF_EVENT_STATE_INACTIVE;
965 event->tstamp_enabled = ctx->time - event->total_time_enabled;
966 list_for_each_entry(sub, &event->sibling_list, group_entry) {
967 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
968 sub->tstamp_enabled =
969 ctx->time - sub->total_time_enabled;
975 * Cross CPU call to enable a performance event
977 static void __perf_event_enable(void *info)
979 struct perf_event *event = info;
980 struct perf_event_context *ctx = event->ctx;
981 struct perf_event *leader = event->group_leader;
982 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
983 int err;
986 * If this is a per-task event, need to check whether this
987 * event's task is the current task on this cpu.
989 if (ctx->task && cpuctx->task_ctx != ctx) {
990 if (cpuctx->task_ctx || ctx->task != current)
991 return;
992 cpuctx->task_ctx = ctx;
995 raw_spin_lock(&ctx->lock);
996 ctx->is_active = 1;
997 update_context_time(ctx);
999 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1000 goto unlock;
1001 __perf_event_mark_enabled(event, ctx);
1003 if (event->cpu != -1 && event->cpu != smp_processor_id())
1004 goto unlock;
1007 * If the event is in a group and isn't the group leader,
1008 * then don't put it on unless the group is on.
1010 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1011 goto unlock;
1013 if (!group_can_go_on(event, cpuctx, 1)) {
1014 err = -EEXIST;
1015 } else {
1016 if (event == leader)
1017 err = group_sched_in(event, cpuctx, ctx);
1018 else
1019 err = event_sched_in(event, cpuctx, ctx);
1022 if (err) {
1024 * If this event can't go on and it's part of a
1025 * group, then the whole group has to come off.
1027 if (leader != event)
1028 group_sched_out(leader, cpuctx, ctx);
1029 if (leader->attr.pinned) {
1030 update_group_times(leader);
1031 leader->state = PERF_EVENT_STATE_ERROR;
1035 unlock:
1036 raw_spin_unlock(&ctx->lock);
1040 * Enable a event.
1042 * If event->ctx is a cloned context, callers must make sure that
1043 * every task struct that event->ctx->task could possibly point to
1044 * remains valid. This condition is satisfied when called through
1045 * perf_event_for_each_child or perf_event_for_each as described
1046 * for perf_event_disable.
1048 void perf_event_enable(struct perf_event *event)
1050 struct perf_event_context *ctx = event->ctx;
1051 struct task_struct *task = ctx->task;
1053 if (!task) {
1055 * Enable the event on the cpu that it's on
1057 smp_call_function_single(event->cpu, __perf_event_enable,
1058 event, 1);
1059 return;
1062 raw_spin_lock_irq(&ctx->lock);
1063 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1064 goto out;
1067 * If the event is in error state, clear that first.
1068 * That way, if we see the event in error state below, we
1069 * know that it has gone back into error state, as distinct
1070 * from the task having been scheduled away before the
1071 * cross-call arrived.
1073 if (event->state == PERF_EVENT_STATE_ERROR)
1074 event->state = PERF_EVENT_STATE_OFF;
1076 retry:
1077 raw_spin_unlock_irq(&ctx->lock);
1078 task_oncpu_function_call(task, __perf_event_enable, event);
1080 raw_spin_lock_irq(&ctx->lock);
1083 * If the context is active and the event is still off,
1084 * we need to retry the cross-call.
1086 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1087 goto retry;
1090 * Since we have the lock this context can't be scheduled
1091 * in, so we can change the state safely.
1093 if (event->state == PERF_EVENT_STATE_OFF)
1094 __perf_event_mark_enabled(event, ctx);
1096 out:
1097 raw_spin_unlock_irq(&ctx->lock);
1100 static int perf_event_refresh(struct perf_event *event, int refresh)
1103 * not supported on inherited events
1105 if (event->attr.inherit)
1106 return -EINVAL;
1108 atomic_add(refresh, &event->event_limit);
1109 perf_event_enable(event);
1111 return 0;
1114 enum event_type_t {
1115 EVENT_FLEXIBLE = 0x1,
1116 EVENT_PINNED = 0x2,
1117 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1120 static void ctx_sched_out(struct perf_event_context *ctx,
1121 struct perf_cpu_context *cpuctx,
1122 enum event_type_t event_type)
1124 struct perf_event *event;
1126 raw_spin_lock(&ctx->lock);
1127 perf_pmu_disable(ctx->pmu);
1128 ctx->is_active = 0;
1129 if (likely(!ctx->nr_events))
1130 goto out;
1131 update_context_time(ctx);
1133 if (!ctx->nr_active)
1134 goto out;
1136 if (event_type & EVENT_PINNED) {
1137 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1138 group_sched_out(event, cpuctx, ctx);
1141 if (event_type & EVENT_FLEXIBLE) {
1142 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1143 group_sched_out(event, cpuctx, ctx);
1145 out:
1146 perf_pmu_enable(ctx->pmu);
1147 raw_spin_unlock(&ctx->lock);
1151 * Test whether two contexts are equivalent, i.e. whether they
1152 * have both been cloned from the same version of the same context
1153 * and they both have the same number of enabled events.
1154 * If the number of enabled events is the same, then the set
1155 * of enabled events should be the same, because these are both
1156 * inherited contexts, therefore we can't access individual events
1157 * in them directly with an fd; we can only enable/disable all
1158 * events via prctl, or enable/disable all events in a family
1159 * via ioctl, which will have the same effect on both contexts.
1161 static int context_equiv(struct perf_event_context *ctx1,
1162 struct perf_event_context *ctx2)
1164 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1165 && ctx1->parent_gen == ctx2->parent_gen
1166 && !ctx1->pin_count && !ctx2->pin_count;
1169 static void __perf_event_sync_stat(struct perf_event *event,
1170 struct perf_event *next_event)
1172 u64 value;
1174 if (!event->attr.inherit_stat)
1175 return;
1178 * Update the event value, we cannot use perf_event_read()
1179 * because we're in the middle of a context switch and have IRQs
1180 * disabled, which upsets smp_call_function_single(), however
1181 * we know the event must be on the current CPU, therefore we
1182 * don't need to use it.
1184 switch (event->state) {
1185 case PERF_EVENT_STATE_ACTIVE:
1186 event->pmu->read(event);
1187 /* fall-through */
1189 case PERF_EVENT_STATE_INACTIVE:
1190 update_event_times(event);
1191 break;
1193 default:
1194 break;
1198 * In order to keep per-task stats reliable we need to flip the event
1199 * values when we flip the contexts.
1201 value = local64_read(&next_event->count);
1202 value = local64_xchg(&event->count, value);
1203 local64_set(&next_event->count, value);
1205 swap(event->total_time_enabled, next_event->total_time_enabled);
1206 swap(event->total_time_running, next_event->total_time_running);
1209 * Since we swizzled the values, update the user visible data too.
1211 perf_event_update_userpage(event);
1212 perf_event_update_userpage(next_event);
1215 #define list_next_entry(pos, member) \
1216 list_entry(pos->member.next, typeof(*pos), member)
1218 static void perf_event_sync_stat(struct perf_event_context *ctx,
1219 struct perf_event_context *next_ctx)
1221 struct perf_event *event, *next_event;
1223 if (!ctx->nr_stat)
1224 return;
1226 update_context_time(ctx);
1228 event = list_first_entry(&ctx->event_list,
1229 struct perf_event, event_entry);
1231 next_event = list_first_entry(&next_ctx->event_list,
1232 struct perf_event, event_entry);
1234 while (&event->event_entry != &ctx->event_list &&
1235 &next_event->event_entry != &next_ctx->event_list) {
1237 __perf_event_sync_stat(event, next_event);
1239 event = list_next_entry(event, event_entry);
1240 next_event = list_next_entry(next_event, event_entry);
1244 void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1245 struct task_struct *next)
1247 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1248 struct perf_event_context *next_ctx;
1249 struct perf_event_context *parent;
1250 struct perf_cpu_context *cpuctx;
1251 int do_switch = 1;
1253 if (likely(!ctx))
1254 return;
1256 cpuctx = __get_cpu_context(ctx);
1257 if (!cpuctx->task_ctx)
1258 return;
1260 rcu_read_lock();
1261 parent = rcu_dereference(ctx->parent_ctx);
1262 next_ctx = next->perf_event_ctxp[ctxn];
1263 if (parent && next_ctx &&
1264 rcu_dereference(next_ctx->parent_ctx) == parent) {
1266 * Looks like the two contexts are clones, so we might be
1267 * able to optimize the context switch. We lock both
1268 * contexts and check that they are clones under the
1269 * lock (including re-checking that neither has been
1270 * uncloned in the meantime). It doesn't matter which
1271 * order we take the locks because no other cpu could
1272 * be trying to lock both of these tasks.
1274 raw_spin_lock(&ctx->lock);
1275 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1276 if (context_equiv(ctx, next_ctx)) {
1278 * XXX do we need a memory barrier of sorts
1279 * wrt to rcu_dereference() of perf_event_ctxp
1281 task->perf_event_ctxp[ctxn] = next_ctx;
1282 next->perf_event_ctxp[ctxn] = ctx;
1283 ctx->task = next;
1284 next_ctx->task = task;
1285 do_switch = 0;
1287 perf_event_sync_stat(ctx, next_ctx);
1289 raw_spin_unlock(&next_ctx->lock);
1290 raw_spin_unlock(&ctx->lock);
1292 rcu_read_unlock();
1294 if (do_switch) {
1295 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1296 cpuctx->task_ctx = NULL;
1300 #define for_each_task_context_nr(ctxn) \
1301 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1304 * Called from scheduler to remove the events of the current task,
1305 * with interrupts disabled.
1307 * We stop each event and update the event value in event->count.
1309 * This does not protect us against NMI, but disable()
1310 * sets the disabled bit in the control field of event _before_
1311 * accessing the event control register. If a NMI hits, then it will
1312 * not restart the event.
1314 void __perf_event_task_sched_out(struct task_struct *task,
1315 struct task_struct *next)
1317 int ctxn;
1319 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1321 for_each_task_context_nr(ctxn)
1322 perf_event_context_sched_out(task, ctxn, next);
1325 static void task_ctx_sched_out(struct perf_event_context *ctx,
1326 enum event_type_t event_type)
1328 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1330 if (!cpuctx->task_ctx)
1331 return;
1333 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1334 return;
1336 ctx_sched_out(ctx, cpuctx, event_type);
1337 cpuctx->task_ctx = NULL;
1341 * Called with IRQs disabled
1343 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1344 enum event_type_t event_type)
1346 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1349 static void
1350 ctx_pinned_sched_in(struct perf_event_context *ctx,
1351 struct perf_cpu_context *cpuctx)
1353 struct perf_event *event;
1355 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1356 if (event->state <= PERF_EVENT_STATE_OFF)
1357 continue;
1358 if (event->cpu != -1 && event->cpu != smp_processor_id())
1359 continue;
1361 if (group_can_go_on(event, cpuctx, 1))
1362 group_sched_in(event, cpuctx, ctx);
1365 * If this pinned group hasn't been scheduled,
1366 * put it in error state.
1368 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1369 update_group_times(event);
1370 event->state = PERF_EVENT_STATE_ERROR;
1375 static void
1376 ctx_flexible_sched_in(struct perf_event_context *ctx,
1377 struct perf_cpu_context *cpuctx)
1379 struct perf_event *event;
1380 int can_add_hw = 1;
1382 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1383 /* Ignore events in OFF or ERROR state */
1384 if (event->state <= PERF_EVENT_STATE_OFF)
1385 continue;
1387 * Listen to the 'cpu' scheduling filter constraint
1388 * of events:
1390 if (event->cpu != -1 && event->cpu != smp_processor_id())
1391 continue;
1393 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1394 if (group_sched_in(event, cpuctx, ctx))
1395 can_add_hw = 0;
1400 static void
1401 ctx_sched_in(struct perf_event_context *ctx,
1402 struct perf_cpu_context *cpuctx,
1403 enum event_type_t event_type)
1405 raw_spin_lock(&ctx->lock);
1406 ctx->is_active = 1;
1407 if (likely(!ctx->nr_events))
1408 goto out;
1410 ctx->timestamp = perf_clock();
1413 * First go through the list and put on any pinned groups
1414 * in order to give them the best chance of going on.
1416 if (event_type & EVENT_PINNED)
1417 ctx_pinned_sched_in(ctx, cpuctx);
1419 /* Then walk through the lower prio flexible groups */
1420 if (event_type & EVENT_FLEXIBLE)
1421 ctx_flexible_sched_in(ctx, cpuctx);
1423 out:
1424 raw_spin_unlock(&ctx->lock);
1427 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1428 enum event_type_t event_type)
1430 struct perf_event_context *ctx = &cpuctx->ctx;
1432 ctx_sched_in(ctx, cpuctx, event_type);
1435 static void task_ctx_sched_in(struct perf_event_context *ctx,
1436 enum event_type_t event_type)
1438 struct perf_cpu_context *cpuctx;
1440 cpuctx = __get_cpu_context(ctx);
1441 if (cpuctx->task_ctx == ctx)
1442 return;
1444 ctx_sched_in(ctx, cpuctx, event_type);
1445 cpuctx->task_ctx = ctx;
1448 void perf_event_context_sched_in(struct perf_event_context *ctx)
1450 struct perf_cpu_context *cpuctx;
1452 cpuctx = __get_cpu_context(ctx);
1453 if (cpuctx->task_ctx == ctx)
1454 return;
1456 perf_pmu_disable(ctx->pmu);
1458 * We want to keep the following priority order:
1459 * cpu pinned (that don't need to move), task pinned,
1460 * cpu flexible, task flexible.
1462 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1464 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1465 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1466 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1468 cpuctx->task_ctx = ctx;
1471 * Since these rotations are per-cpu, we need to ensure the
1472 * cpu-context we got scheduled on is actually rotating.
1474 perf_pmu_rotate_start(ctx->pmu);
1475 perf_pmu_enable(ctx->pmu);
1479 * Called from scheduler to add the events of the current task
1480 * with interrupts disabled.
1482 * We restore the event value and then enable it.
1484 * This does not protect us against NMI, but enable()
1485 * sets the enabled bit in the control field of event _before_
1486 * accessing the event control register. If a NMI hits, then it will
1487 * keep the event running.
1489 void __perf_event_task_sched_in(struct task_struct *task)
1491 struct perf_event_context *ctx;
1492 int ctxn;
1494 for_each_task_context_nr(ctxn) {
1495 ctx = task->perf_event_ctxp[ctxn];
1496 if (likely(!ctx))
1497 continue;
1499 perf_event_context_sched_in(ctx);
1503 #define MAX_INTERRUPTS (~0ULL)
1505 static void perf_log_throttle(struct perf_event *event, int enable);
1507 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1509 u64 frequency = event->attr.sample_freq;
1510 u64 sec = NSEC_PER_SEC;
1511 u64 divisor, dividend;
1513 int count_fls, nsec_fls, frequency_fls, sec_fls;
1515 count_fls = fls64(count);
1516 nsec_fls = fls64(nsec);
1517 frequency_fls = fls64(frequency);
1518 sec_fls = 30;
1521 * We got @count in @nsec, with a target of sample_freq HZ
1522 * the target period becomes:
1524 * @count * 10^9
1525 * period = -------------------
1526 * @nsec * sample_freq
1531 * Reduce accuracy by one bit such that @a and @b converge
1532 * to a similar magnitude.
1534 #define REDUCE_FLS(a, b) \
1535 do { \
1536 if (a##_fls > b##_fls) { \
1537 a >>= 1; \
1538 a##_fls--; \
1539 } else { \
1540 b >>= 1; \
1541 b##_fls--; \
1543 } while (0)
1546 * Reduce accuracy until either term fits in a u64, then proceed with
1547 * the other, so that finally we can do a u64/u64 division.
1549 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1550 REDUCE_FLS(nsec, frequency);
1551 REDUCE_FLS(sec, count);
1554 if (count_fls + sec_fls > 64) {
1555 divisor = nsec * frequency;
1557 while (count_fls + sec_fls > 64) {
1558 REDUCE_FLS(count, sec);
1559 divisor >>= 1;
1562 dividend = count * sec;
1563 } else {
1564 dividend = count * sec;
1566 while (nsec_fls + frequency_fls > 64) {
1567 REDUCE_FLS(nsec, frequency);
1568 dividend >>= 1;
1571 divisor = nsec * frequency;
1574 if (!divisor)
1575 return dividend;
1577 return div64_u64(dividend, divisor);
1580 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1582 struct hw_perf_event *hwc = &event->hw;
1583 s64 period, sample_period;
1584 s64 delta;
1586 period = perf_calculate_period(event, nsec, count);
1588 delta = (s64)(period - hwc->sample_period);
1589 delta = (delta + 7) / 8; /* low pass filter */
1591 sample_period = hwc->sample_period + delta;
1593 if (!sample_period)
1594 sample_period = 1;
1596 hwc->sample_period = sample_period;
1598 if (local64_read(&hwc->period_left) > 8*sample_period) {
1599 event->pmu->stop(event, PERF_EF_UPDATE);
1600 local64_set(&hwc->period_left, 0);
1601 event->pmu->start(event, PERF_EF_RELOAD);
1605 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
1607 struct perf_event *event;
1608 struct hw_perf_event *hwc;
1609 u64 interrupts, now;
1610 s64 delta;
1612 raw_spin_lock(&ctx->lock);
1613 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1614 if (event->state != PERF_EVENT_STATE_ACTIVE)
1615 continue;
1617 if (event->cpu != -1 && event->cpu != smp_processor_id())
1618 continue;
1620 hwc = &event->hw;
1622 interrupts = hwc->interrupts;
1623 hwc->interrupts = 0;
1626 * unthrottle events on the tick
1628 if (interrupts == MAX_INTERRUPTS) {
1629 perf_log_throttle(event, 1);
1630 event->pmu->start(event, 0);
1633 if (!event->attr.freq || !event->attr.sample_freq)
1634 continue;
1636 event->pmu->read(event);
1637 now = local64_read(&event->count);
1638 delta = now - hwc->freq_count_stamp;
1639 hwc->freq_count_stamp = now;
1641 if (delta > 0)
1642 perf_adjust_period(event, period, delta);
1644 raw_spin_unlock(&ctx->lock);
1648 * Round-robin a context's events:
1650 static void rotate_ctx(struct perf_event_context *ctx)
1652 raw_spin_lock(&ctx->lock);
1654 /* Rotate the first entry last of non-pinned groups */
1655 list_rotate_left(&ctx->flexible_groups);
1657 raw_spin_unlock(&ctx->lock);
1661 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1662 * because they're strictly cpu affine and rotate_start is called with IRQs
1663 * disabled, while rotate_context is called from IRQ context.
1665 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
1667 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
1668 struct perf_event_context *ctx = NULL;
1669 int rotate = 0, remove = 1;
1671 if (cpuctx->ctx.nr_events) {
1672 remove = 0;
1673 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1674 rotate = 1;
1677 ctx = cpuctx->task_ctx;
1678 if (ctx && ctx->nr_events) {
1679 remove = 0;
1680 if (ctx->nr_events != ctx->nr_active)
1681 rotate = 1;
1684 perf_pmu_disable(cpuctx->ctx.pmu);
1685 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
1686 if (ctx)
1687 perf_ctx_adjust_freq(ctx, interval);
1689 if (!rotate)
1690 goto done;
1692 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1693 if (ctx)
1694 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1696 rotate_ctx(&cpuctx->ctx);
1697 if (ctx)
1698 rotate_ctx(ctx);
1700 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1701 if (ctx)
1702 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
1704 done:
1705 if (remove)
1706 list_del_init(&cpuctx->rotation_list);
1708 perf_pmu_enable(cpuctx->ctx.pmu);
1711 void perf_event_task_tick(void)
1713 struct list_head *head = &__get_cpu_var(rotation_list);
1714 struct perf_cpu_context *cpuctx, *tmp;
1716 WARN_ON(!irqs_disabled());
1718 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1719 if (cpuctx->jiffies_interval == 1 ||
1720 !(jiffies % cpuctx->jiffies_interval))
1721 perf_rotate_context(cpuctx);
1725 static int event_enable_on_exec(struct perf_event *event,
1726 struct perf_event_context *ctx)
1728 if (!event->attr.enable_on_exec)
1729 return 0;
1731 event->attr.enable_on_exec = 0;
1732 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1733 return 0;
1735 __perf_event_mark_enabled(event, ctx);
1737 return 1;
1741 * Enable all of a task's events that have been marked enable-on-exec.
1742 * This expects task == current.
1744 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
1746 struct perf_event *event;
1747 unsigned long flags;
1748 int enabled = 0;
1749 int ret;
1751 local_irq_save(flags);
1752 if (!ctx || !ctx->nr_events)
1753 goto out;
1755 task_ctx_sched_out(ctx, EVENT_ALL);
1757 raw_spin_lock(&ctx->lock);
1759 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1760 ret = event_enable_on_exec(event, ctx);
1761 if (ret)
1762 enabled = 1;
1765 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1766 ret = event_enable_on_exec(event, ctx);
1767 if (ret)
1768 enabled = 1;
1772 * Unclone this context if we enabled any event.
1774 if (enabled)
1775 unclone_ctx(ctx);
1777 raw_spin_unlock(&ctx->lock);
1779 perf_event_context_sched_in(ctx);
1780 out:
1781 local_irq_restore(flags);
1785 * Cross CPU call to read the hardware event
1787 static void __perf_event_read(void *info)
1789 struct perf_event *event = info;
1790 struct perf_event_context *ctx = event->ctx;
1791 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1794 * If this is a task context, we need to check whether it is
1795 * the current task context of this cpu. If not it has been
1796 * scheduled out before the smp call arrived. In that case
1797 * event->count would have been updated to a recent sample
1798 * when the event was scheduled out.
1800 if (ctx->task && cpuctx->task_ctx != ctx)
1801 return;
1803 raw_spin_lock(&ctx->lock);
1804 update_context_time(ctx);
1805 update_event_times(event);
1806 raw_spin_unlock(&ctx->lock);
1808 event->pmu->read(event);
1811 static inline u64 perf_event_count(struct perf_event *event)
1813 return local64_read(&event->count) + atomic64_read(&event->child_count);
1816 static u64 perf_event_read(struct perf_event *event)
1819 * If event is enabled and currently active on a CPU, update the
1820 * value in the event structure:
1822 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1823 smp_call_function_single(event->oncpu,
1824 __perf_event_read, event, 1);
1825 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1826 struct perf_event_context *ctx = event->ctx;
1827 unsigned long flags;
1829 raw_spin_lock_irqsave(&ctx->lock, flags);
1831 * may read while context is not active
1832 * (e.g., thread is blocked), in that case
1833 * we cannot update context time
1835 if (ctx->is_active)
1836 update_context_time(ctx);
1837 update_event_times(event);
1838 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1841 return perf_event_count(event);
1845 * Callchain support
1848 struct callchain_cpus_entries {
1849 struct rcu_head rcu_head;
1850 struct perf_callchain_entry *cpu_entries[0];
1853 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1854 static atomic_t nr_callchain_events;
1855 static DEFINE_MUTEX(callchain_mutex);
1856 struct callchain_cpus_entries *callchain_cpus_entries;
1859 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1860 struct pt_regs *regs)
1864 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
1865 struct pt_regs *regs)
1869 static void release_callchain_buffers_rcu(struct rcu_head *head)
1871 struct callchain_cpus_entries *entries;
1872 int cpu;
1874 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1876 for_each_possible_cpu(cpu)
1877 kfree(entries->cpu_entries[cpu]);
1879 kfree(entries);
1882 static void release_callchain_buffers(void)
1884 struct callchain_cpus_entries *entries;
1886 entries = callchain_cpus_entries;
1887 rcu_assign_pointer(callchain_cpus_entries, NULL);
1888 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1891 static int alloc_callchain_buffers(void)
1893 int cpu;
1894 int size;
1895 struct callchain_cpus_entries *entries;
1898 * We can't use the percpu allocation API for data that can be
1899 * accessed from NMI. Use a temporary manual per cpu allocation
1900 * until that gets sorted out.
1902 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1903 num_possible_cpus();
1905 entries = kzalloc(size, GFP_KERNEL);
1906 if (!entries)
1907 return -ENOMEM;
1909 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1911 for_each_possible_cpu(cpu) {
1912 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1913 cpu_to_node(cpu));
1914 if (!entries->cpu_entries[cpu])
1915 goto fail;
1918 rcu_assign_pointer(callchain_cpus_entries, entries);
1920 return 0;
1922 fail:
1923 for_each_possible_cpu(cpu)
1924 kfree(entries->cpu_entries[cpu]);
1925 kfree(entries);
1927 return -ENOMEM;
1930 static int get_callchain_buffers(void)
1932 int err = 0;
1933 int count;
1935 mutex_lock(&callchain_mutex);
1937 count = atomic_inc_return(&nr_callchain_events);
1938 if (WARN_ON_ONCE(count < 1)) {
1939 err = -EINVAL;
1940 goto exit;
1943 if (count > 1) {
1944 /* If the allocation failed, give up */
1945 if (!callchain_cpus_entries)
1946 err = -ENOMEM;
1947 goto exit;
1950 err = alloc_callchain_buffers();
1951 if (err)
1952 release_callchain_buffers();
1953 exit:
1954 mutex_unlock(&callchain_mutex);
1956 return err;
1959 static void put_callchain_buffers(void)
1961 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1962 release_callchain_buffers();
1963 mutex_unlock(&callchain_mutex);
1967 static int get_recursion_context(int *recursion)
1969 int rctx;
1971 if (in_nmi())
1972 rctx = 3;
1973 else if (in_irq())
1974 rctx = 2;
1975 else if (in_softirq())
1976 rctx = 1;
1977 else
1978 rctx = 0;
1980 if (recursion[rctx])
1981 return -1;
1983 recursion[rctx]++;
1984 barrier();
1986 return rctx;
1989 static inline void put_recursion_context(int *recursion, int rctx)
1991 barrier();
1992 recursion[rctx]--;
1995 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1997 int cpu;
1998 struct callchain_cpus_entries *entries;
2000 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2001 if (*rctx == -1)
2002 return NULL;
2004 entries = rcu_dereference(callchain_cpus_entries);
2005 if (!entries)
2006 return NULL;
2008 cpu = smp_processor_id();
2010 return &entries->cpu_entries[cpu][*rctx];
2013 static void
2014 put_callchain_entry(int rctx)
2016 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2019 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2021 int rctx;
2022 struct perf_callchain_entry *entry;
2025 entry = get_callchain_entry(&rctx);
2026 if (rctx == -1)
2027 return NULL;
2029 if (!entry)
2030 goto exit_put;
2032 entry->nr = 0;
2034 if (!user_mode(regs)) {
2035 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2036 perf_callchain_kernel(entry, regs);
2037 if (current->mm)
2038 regs = task_pt_regs(current);
2039 else
2040 regs = NULL;
2043 if (regs) {
2044 perf_callchain_store(entry, PERF_CONTEXT_USER);
2045 perf_callchain_user(entry, regs);
2048 exit_put:
2049 put_callchain_entry(rctx);
2051 return entry;
2055 * Initialize the perf_event context in a task_struct:
2057 static void __perf_event_init_context(struct perf_event_context *ctx)
2059 raw_spin_lock_init(&ctx->lock);
2060 mutex_init(&ctx->mutex);
2061 INIT_LIST_HEAD(&ctx->pinned_groups);
2062 INIT_LIST_HEAD(&ctx->flexible_groups);
2063 INIT_LIST_HEAD(&ctx->event_list);
2064 atomic_set(&ctx->refcount, 1);
2067 static struct perf_event_context *
2068 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2070 struct perf_event_context *ctx;
2072 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2073 if (!ctx)
2074 return NULL;
2076 __perf_event_init_context(ctx);
2077 if (task) {
2078 ctx->task = task;
2079 get_task_struct(task);
2081 ctx->pmu = pmu;
2083 return ctx;
2086 static struct task_struct *
2087 find_lively_task_by_vpid(pid_t vpid)
2089 struct task_struct *task;
2090 int err;
2092 rcu_read_lock();
2093 if (!vpid)
2094 task = current;
2095 else
2096 task = find_task_by_vpid(vpid);
2097 if (task)
2098 get_task_struct(task);
2099 rcu_read_unlock();
2101 if (!task)
2102 return ERR_PTR(-ESRCH);
2105 * Can't attach events to a dying task.
2107 err = -ESRCH;
2108 if (task->flags & PF_EXITING)
2109 goto errout;
2111 /* Reuse ptrace permission checks for now. */
2112 err = -EACCES;
2113 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2114 goto errout;
2116 return task;
2117 errout:
2118 put_task_struct(task);
2119 return ERR_PTR(err);
2123 static struct perf_event_context *
2124 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2126 struct perf_event_context *ctx;
2127 struct perf_cpu_context *cpuctx;
2128 unsigned long flags;
2129 int ctxn, err;
2131 if (!task && cpu != -1) {
2132 /* Must be root to operate on a CPU event: */
2133 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2134 return ERR_PTR(-EACCES);
2136 if (cpu < 0 || cpu >= nr_cpumask_bits)
2137 return ERR_PTR(-EINVAL);
2140 * We could be clever and allow to attach a event to an
2141 * offline CPU and activate it when the CPU comes up, but
2142 * that's for later.
2144 if (!cpu_online(cpu))
2145 return ERR_PTR(-ENODEV);
2147 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2148 ctx = &cpuctx->ctx;
2149 get_ctx(ctx);
2151 return ctx;
2154 err = -EINVAL;
2155 ctxn = pmu->task_ctx_nr;
2156 if (ctxn < 0)
2157 goto errout;
2159 retry:
2160 ctx = perf_lock_task_context(task, ctxn, &flags);
2161 if (ctx) {
2162 unclone_ctx(ctx);
2163 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2166 if (!ctx) {
2167 ctx = alloc_perf_context(pmu, task);
2168 err = -ENOMEM;
2169 if (!ctx)
2170 goto errout;
2172 get_ctx(ctx);
2174 if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
2176 * We raced with some other task; use
2177 * the context they set.
2179 put_task_struct(task);
2180 kfree(ctx);
2181 goto retry;
2185 return ctx;
2187 errout:
2188 return ERR_PTR(err);
2191 static void perf_event_free_filter(struct perf_event *event);
2193 static void free_event_rcu(struct rcu_head *head)
2195 struct perf_event *event;
2197 event = container_of(head, struct perf_event, rcu_head);
2198 if (event->ns)
2199 put_pid_ns(event->ns);
2200 perf_event_free_filter(event);
2201 kfree(event);
2204 static void perf_buffer_put(struct perf_buffer *buffer);
2206 static void free_event(struct perf_event *event)
2208 irq_work_sync(&event->pending);
2210 if (!event->parent) {
2211 if (event->attach_state & PERF_ATTACH_TASK)
2212 jump_label_dec(&perf_task_events);
2213 if (event->attr.mmap || event->attr.mmap_data)
2214 atomic_dec(&nr_mmap_events);
2215 if (event->attr.comm)
2216 atomic_dec(&nr_comm_events);
2217 if (event->attr.task)
2218 atomic_dec(&nr_task_events);
2219 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2220 put_callchain_buffers();
2223 if (event->buffer) {
2224 perf_buffer_put(event->buffer);
2225 event->buffer = NULL;
2228 if (event->destroy)
2229 event->destroy(event);
2231 if (event->ctx)
2232 put_ctx(event->ctx);
2234 call_rcu(&event->rcu_head, free_event_rcu);
2237 int perf_event_release_kernel(struct perf_event *event)
2239 struct perf_event_context *ctx = event->ctx;
2242 * Remove from the PMU, can't get re-enabled since we got
2243 * here because the last ref went.
2245 perf_event_disable(event);
2247 WARN_ON_ONCE(ctx->parent_ctx);
2249 * There are two ways this annotation is useful:
2251 * 1) there is a lock recursion from perf_event_exit_task
2252 * see the comment there.
2254 * 2) there is a lock-inversion with mmap_sem through
2255 * perf_event_read_group(), which takes faults while
2256 * holding ctx->mutex, however this is called after
2257 * the last filedesc died, so there is no possibility
2258 * to trigger the AB-BA case.
2260 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2261 raw_spin_lock_irq(&ctx->lock);
2262 perf_group_detach(event);
2263 list_del_event(event, ctx);
2264 raw_spin_unlock_irq(&ctx->lock);
2265 mutex_unlock(&ctx->mutex);
2267 mutex_lock(&event->owner->perf_event_mutex);
2268 list_del_init(&event->owner_entry);
2269 mutex_unlock(&event->owner->perf_event_mutex);
2270 put_task_struct(event->owner);
2272 free_event(event);
2274 return 0;
2276 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2279 * Called when the last reference to the file is gone.
2281 static int perf_release(struct inode *inode, struct file *file)
2283 struct perf_event *event = file->private_data;
2285 file->private_data = NULL;
2287 return perf_event_release_kernel(event);
2290 static int perf_event_read_size(struct perf_event *event)
2292 int entry = sizeof(u64); /* value */
2293 int size = 0;
2294 int nr = 1;
2296 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2297 size += sizeof(u64);
2299 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2300 size += sizeof(u64);
2302 if (event->attr.read_format & PERF_FORMAT_ID)
2303 entry += sizeof(u64);
2305 if (event->attr.read_format & PERF_FORMAT_GROUP) {
2306 nr += event->group_leader->nr_siblings;
2307 size += sizeof(u64);
2310 size += entry * nr;
2312 return size;
2315 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2317 struct perf_event *child;
2318 u64 total = 0;
2320 *enabled = 0;
2321 *running = 0;
2323 mutex_lock(&event->child_mutex);
2324 total += perf_event_read(event);
2325 *enabled += event->total_time_enabled +
2326 atomic64_read(&event->child_total_time_enabled);
2327 *running += event->total_time_running +
2328 atomic64_read(&event->child_total_time_running);
2330 list_for_each_entry(child, &event->child_list, child_list) {
2331 total += perf_event_read(child);
2332 *enabled += child->total_time_enabled;
2333 *running += child->total_time_running;
2335 mutex_unlock(&event->child_mutex);
2337 return total;
2339 EXPORT_SYMBOL_GPL(perf_event_read_value);
2341 static int perf_event_read_group(struct perf_event *event,
2342 u64 read_format, char __user *buf)
2344 struct perf_event *leader = event->group_leader, *sub;
2345 int n = 0, size = 0, ret = -EFAULT;
2346 struct perf_event_context *ctx = leader->ctx;
2347 u64 values[5];
2348 u64 count, enabled, running;
2350 mutex_lock(&ctx->mutex);
2351 count = perf_event_read_value(leader, &enabled, &running);
2353 values[n++] = 1 + leader->nr_siblings;
2354 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2355 values[n++] = enabled;
2356 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2357 values[n++] = running;
2358 values[n++] = count;
2359 if (read_format & PERF_FORMAT_ID)
2360 values[n++] = primary_event_id(leader);
2362 size = n * sizeof(u64);
2364 if (copy_to_user(buf, values, size))
2365 goto unlock;
2367 ret = size;
2369 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2370 n = 0;
2372 values[n++] = perf_event_read_value(sub, &enabled, &running);
2373 if (read_format & PERF_FORMAT_ID)
2374 values[n++] = primary_event_id(sub);
2376 size = n * sizeof(u64);
2378 if (copy_to_user(buf + ret, values, size)) {
2379 ret = -EFAULT;
2380 goto unlock;
2383 ret += size;
2385 unlock:
2386 mutex_unlock(&ctx->mutex);
2388 return ret;
2391 static int perf_event_read_one(struct perf_event *event,
2392 u64 read_format, char __user *buf)
2394 u64 enabled, running;
2395 u64 values[4];
2396 int n = 0;
2398 values[n++] = perf_event_read_value(event, &enabled, &running);
2399 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2400 values[n++] = enabled;
2401 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2402 values[n++] = running;
2403 if (read_format & PERF_FORMAT_ID)
2404 values[n++] = primary_event_id(event);
2406 if (copy_to_user(buf, values, n * sizeof(u64)))
2407 return -EFAULT;
2409 return n * sizeof(u64);
2413 * Read the performance event - simple non blocking version for now
2415 static ssize_t
2416 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2418 u64 read_format = event->attr.read_format;
2419 int ret;
2422 * Return end-of-file for a read on a event that is in
2423 * error state (i.e. because it was pinned but it couldn't be
2424 * scheduled on to the CPU at some point).
2426 if (event->state == PERF_EVENT_STATE_ERROR)
2427 return 0;
2429 if (count < perf_event_read_size(event))
2430 return -ENOSPC;
2432 WARN_ON_ONCE(event->ctx->parent_ctx);
2433 if (read_format & PERF_FORMAT_GROUP)
2434 ret = perf_event_read_group(event, read_format, buf);
2435 else
2436 ret = perf_event_read_one(event, read_format, buf);
2438 return ret;
2441 static ssize_t
2442 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2444 struct perf_event *event = file->private_data;
2446 return perf_read_hw(event, buf, count);
2449 static unsigned int perf_poll(struct file *file, poll_table *wait)
2451 struct perf_event *event = file->private_data;
2452 struct perf_buffer *buffer;
2453 unsigned int events = POLL_HUP;
2455 rcu_read_lock();
2456 buffer = rcu_dereference(event->buffer);
2457 if (buffer)
2458 events = atomic_xchg(&buffer->poll, 0);
2459 rcu_read_unlock();
2461 poll_wait(file, &event->waitq, wait);
2463 return events;
2466 static void perf_event_reset(struct perf_event *event)
2468 (void)perf_event_read(event);
2469 local64_set(&event->count, 0);
2470 perf_event_update_userpage(event);
2474 * Holding the top-level event's child_mutex means that any
2475 * descendant process that has inherited this event will block
2476 * in sync_child_event if it goes to exit, thus satisfying the
2477 * task existence requirements of perf_event_enable/disable.
2479 static void perf_event_for_each_child(struct perf_event *event,
2480 void (*func)(struct perf_event *))
2482 struct perf_event *child;
2484 WARN_ON_ONCE(event->ctx->parent_ctx);
2485 mutex_lock(&event->child_mutex);
2486 func(event);
2487 list_for_each_entry(child, &event->child_list, child_list)
2488 func(child);
2489 mutex_unlock(&event->child_mutex);
2492 static void perf_event_for_each(struct perf_event *event,
2493 void (*func)(struct perf_event *))
2495 struct perf_event_context *ctx = event->ctx;
2496 struct perf_event *sibling;
2498 WARN_ON_ONCE(ctx->parent_ctx);
2499 mutex_lock(&ctx->mutex);
2500 event = event->group_leader;
2502 perf_event_for_each_child(event, func);
2503 func(event);
2504 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2505 perf_event_for_each_child(event, func);
2506 mutex_unlock(&ctx->mutex);
2509 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2511 struct perf_event_context *ctx = event->ctx;
2512 int ret = 0;
2513 u64 value;
2515 if (!event->attr.sample_period)
2516 return -EINVAL;
2518 if (copy_from_user(&value, arg, sizeof(value)))
2519 return -EFAULT;
2521 if (!value)
2522 return -EINVAL;
2524 raw_spin_lock_irq(&ctx->lock);
2525 if (event->attr.freq) {
2526 if (value > sysctl_perf_event_sample_rate) {
2527 ret = -EINVAL;
2528 goto unlock;
2531 event->attr.sample_freq = value;
2532 } else {
2533 event->attr.sample_period = value;
2534 event->hw.sample_period = value;
2536 unlock:
2537 raw_spin_unlock_irq(&ctx->lock);
2539 return ret;
2542 static const struct file_operations perf_fops;
2544 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2546 struct file *file;
2548 file = fget_light(fd, fput_needed);
2549 if (!file)
2550 return ERR_PTR(-EBADF);
2552 if (file->f_op != &perf_fops) {
2553 fput_light(file, *fput_needed);
2554 *fput_needed = 0;
2555 return ERR_PTR(-EBADF);
2558 return file->private_data;
2561 static int perf_event_set_output(struct perf_event *event,
2562 struct perf_event *output_event);
2563 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2565 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2567 struct perf_event *event = file->private_data;
2568 void (*func)(struct perf_event *);
2569 u32 flags = arg;
2571 switch (cmd) {
2572 case PERF_EVENT_IOC_ENABLE:
2573 func = perf_event_enable;
2574 break;
2575 case PERF_EVENT_IOC_DISABLE:
2576 func = perf_event_disable;
2577 break;
2578 case PERF_EVENT_IOC_RESET:
2579 func = perf_event_reset;
2580 break;
2582 case PERF_EVENT_IOC_REFRESH:
2583 return perf_event_refresh(event, arg);
2585 case PERF_EVENT_IOC_PERIOD:
2586 return perf_event_period(event, (u64 __user *)arg);
2588 case PERF_EVENT_IOC_SET_OUTPUT:
2590 struct perf_event *output_event = NULL;
2591 int fput_needed = 0;
2592 int ret;
2594 if (arg != -1) {
2595 output_event = perf_fget_light(arg, &fput_needed);
2596 if (IS_ERR(output_event))
2597 return PTR_ERR(output_event);
2600 ret = perf_event_set_output(event, output_event);
2601 if (output_event)
2602 fput_light(output_event->filp, fput_needed);
2604 return ret;
2607 case PERF_EVENT_IOC_SET_FILTER:
2608 return perf_event_set_filter(event, (void __user *)arg);
2610 default:
2611 return -ENOTTY;
2614 if (flags & PERF_IOC_FLAG_GROUP)
2615 perf_event_for_each(event, func);
2616 else
2617 perf_event_for_each_child(event, func);
2619 return 0;
2622 int perf_event_task_enable(void)
2624 struct perf_event *event;
2626 mutex_lock(&current->perf_event_mutex);
2627 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2628 perf_event_for_each_child(event, perf_event_enable);
2629 mutex_unlock(&current->perf_event_mutex);
2631 return 0;
2634 int perf_event_task_disable(void)
2636 struct perf_event *event;
2638 mutex_lock(&current->perf_event_mutex);
2639 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2640 perf_event_for_each_child(event, perf_event_disable);
2641 mutex_unlock(&current->perf_event_mutex);
2643 return 0;
2646 #ifndef PERF_EVENT_INDEX_OFFSET
2647 # define PERF_EVENT_INDEX_OFFSET 0
2648 #endif
2650 static int perf_event_index(struct perf_event *event)
2652 if (event->hw.state & PERF_HES_STOPPED)
2653 return 0;
2655 if (event->state != PERF_EVENT_STATE_ACTIVE)
2656 return 0;
2658 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2662 * Callers need to ensure there can be no nesting of this function, otherwise
2663 * the seqlock logic goes bad. We can not serialize this because the arch
2664 * code calls this from NMI context.
2666 void perf_event_update_userpage(struct perf_event *event)
2668 struct perf_event_mmap_page *userpg;
2669 struct perf_buffer *buffer;
2671 rcu_read_lock();
2672 buffer = rcu_dereference(event->buffer);
2673 if (!buffer)
2674 goto unlock;
2676 userpg = buffer->user_page;
2679 * Disable preemption so as to not let the corresponding user-space
2680 * spin too long if we get preempted.
2682 preempt_disable();
2683 ++userpg->lock;
2684 barrier();
2685 userpg->index = perf_event_index(event);
2686 userpg->offset = perf_event_count(event);
2687 if (event->state == PERF_EVENT_STATE_ACTIVE)
2688 userpg->offset -= local64_read(&event->hw.prev_count);
2690 userpg->time_enabled = event->total_time_enabled +
2691 atomic64_read(&event->child_total_time_enabled);
2693 userpg->time_running = event->total_time_running +
2694 atomic64_read(&event->child_total_time_running);
2696 barrier();
2697 ++userpg->lock;
2698 preempt_enable();
2699 unlock:
2700 rcu_read_unlock();
2703 static unsigned long perf_data_size(struct perf_buffer *buffer);
2705 static void
2706 perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2708 long max_size = perf_data_size(buffer);
2710 if (watermark)
2711 buffer->watermark = min(max_size, watermark);
2713 if (!buffer->watermark)
2714 buffer->watermark = max_size / 2;
2716 if (flags & PERF_BUFFER_WRITABLE)
2717 buffer->writable = 1;
2719 atomic_set(&buffer->refcount, 1);
2722 #ifndef CONFIG_PERF_USE_VMALLOC
2725 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2728 static struct page *
2729 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2731 if (pgoff > buffer->nr_pages)
2732 return NULL;
2734 if (pgoff == 0)
2735 return virt_to_page(buffer->user_page);
2737 return virt_to_page(buffer->data_pages[pgoff - 1]);
2740 static void *perf_mmap_alloc_page(int cpu)
2742 struct page *page;
2743 int node;
2745 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2746 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2747 if (!page)
2748 return NULL;
2750 return page_address(page);
2753 static struct perf_buffer *
2754 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2756 struct perf_buffer *buffer;
2757 unsigned long size;
2758 int i;
2760 size = sizeof(struct perf_buffer);
2761 size += nr_pages * sizeof(void *);
2763 buffer = kzalloc(size, GFP_KERNEL);
2764 if (!buffer)
2765 goto fail;
2767 buffer->user_page = perf_mmap_alloc_page(cpu);
2768 if (!buffer->user_page)
2769 goto fail_user_page;
2771 for (i = 0; i < nr_pages; i++) {
2772 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
2773 if (!buffer->data_pages[i])
2774 goto fail_data_pages;
2777 buffer->nr_pages = nr_pages;
2779 perf_buffer_init(buffer, watermark, flags);
2781 return buffer;
2783 fail_data_pages:
2784 for (i--; i >= 0; i--)
2785 free_page((unsigned long)buffer->data_pages[i]);
2787 free_page((unsigned long)buffer->user_page);
2789 fail_user_page:
2790 kfree(buffer);
2792 fail:
2793 return NULL;
2796 static void perf_mmap_free_page(unsigned long addr)
2798 struct page *page = virt_to_page((void *)addr);
2800 page->mapping = NULL;
2801 __free_page(page);
2804 static void perf_buffer_free(struct perf_buffer *buffer)
2806 int i;
2808 perf_mmap_free_page((unsigned long)buffer->user_page);
2809 for (i = 0; i < buffer->nr_pages; i++)
2810 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2811 kfree(buffer);
2814 static inline int page_order(struct perf_buffer *buffer)
2816 return 0;
2819 #else
2822 * Back perf_mmap() with vmalloc memory.
2824 * Required for architectures that have d-cache aliasing issues.
2827 static inline int page_order(struct perf_buffer *buffer)
2829 return buffer->page_order;
2832 static struct page *
2833 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2835 if (pgoff > (1UL << page_order(buffer)))
2836 return NULL;
2838 return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2841 static void perf_mmap_unmark_page(void *addr)
2843 struct page *page = vmalloc_to_page(addr);
2845 page->mapping = NULL;
2848 static void perf_buffer_free_work(struct work_struct *work)
2850 struct perf_buffer *buffer;
2851 void *base;
2852 int i, nr;
2854 buffer = container_of(work, struct perf_buffer, work);
2855 nr = 1 << page_order(buffer);
2857 base = buffer->user_page;
2858 for (i = 0; i < nr + 1; i++)
2859 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2861 vfree(base);
2862 kfree(buffer);
2865 static void perf_buffer_free(struct perf_buffer *buffer)
2867 schedule_work(&buffer->work);
2870 static struct perf_buffer *
2871 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2873 struct perf_buffer *buffer;
2874 unsigned long size;
2875 void *all_buf;
2877 size = sizeof(struct perf_buffer);
2878 size += sizeof(void *);
2880 buffer = kzalloc(size, GFP_KERNEL);
2881 if (!buffer)
2882 goto fail;
2884 INIT_WORK(&buffer->work, perf_buffer_free_work);
2886 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2887 if (!all_buf)
2888 goto fail_all_buf;
2890 buffer->user_page = all_buf;
2891 buffer->data_pages[0] = all_buf + PAGE_SIZE;
2892 buffer->page_order = ilog2(nr_pages);
2893 buffer->nr_pages = 1;
2895 perf_buffer_init(buffer, watermark, flags);
2897 return buffer;
2899 fail_all_buf:
2900 kfree(buffer);
2902 fail:
2903 return NULL;
2906 #endif
2908 static unsigned long perf_data_size(struct perf_buffer *buffer)
2910 return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2913 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2915 struct perf_event *event = vma->vm_file->private_data;
2916 struct perf_buffer *buffer;
2917 int ret = VM_FAULT_SIGBUS;
2919 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2920 if (vmf->pgoff == 0)
2921 ret = 0;
2922 return ret;
2925 rcu_read_lock();
2926 buffer = rcu_dereference(event->buffer);
2927 if (!buffer)
2928 goto unlock;
2930 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2931 goto unlock;
2933 vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
2934 if (!vmf->page)
2935 goto unlock;
2937 get_page(vmf->page);
2938 vmf->page->mapping = vma->vm_file->f_mapping;
2939 vmf->page->index = vmf->pgoff;
2941 ret = 0;
2942 unlock:
2943 rcu_read_unlock();
2945 return ret;
2948 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
2950 struct perf_buffer *buffer;
2952 buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2953 perf_buffer_free(buffer);
2956 static struct perf_buffer *perf_buffer_get(struct perf_event *event)
2958 struct perf_buffer *buffer;
2960 rcu_read_lock();
2961 buffer = rcu_dereference(event->buffer);
2962 if (buffer) {
2963 if (!atomic_inc_not_zero(&buffer->refcount))
2964 buffer = NULL;
2966 rcu_read_unlock();
2968 return buffer;
2971 static void perf_buffer_put(struct perf_buffer *buffer)
2973 if (!atomic_dec_and_test(&buffer->refcount))
2974 return;
2976 call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
2979 static void perf_mmap_open(struct vm_area_struct *vma)
2981 struct perf_event *event = vma->vm_file->private_data;
2983 atomic_inc(&event->mmap_count);
2986 static void perf_mmap_close(struct vm_area_struct *vma)
2988 struct perf_event *event = vma->vm_file->private_data;
2990 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2991 unsigned long size = perf_data_size(event->buffer);
2992 struct user_struct *user = event->mmap_user;
2993 struct perf_buffer *buffer = event->buffer;
2995 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2996 vma->vm_mm->locked_vm -= event->mmap_locked;
2997 rcu_assign_pointer(event->buffer, NULL);
2998 mutex_unlock(&event->mmap_mutex);
3000 perf_buffer_put(buffer);
3001 free_uid(user);
3005 static const struct vm_operations_struct perf_mmap_vmops = {
3006 .open = perf_mmap_open,
3007 .close = perf_mmap_close,
3008 .fault = perf_mmap_fault,
3009 .page_mkwrite = perf_mmap_fault,
3012 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3014 struct perf_event *event = file->private_data;
3015 unsigned long user_locked, user_lock_limit;
3016 struct user_struct *user = current_user();
3017 unsigned long locked, lock_limit;
3018 struct perf_buffer *buffer;
3019 unsigned long vma_size;
3020 unsigned long nr_pages;
3021 long user_extra, extra;
3022 int ret = 0, flags = 0;
3025 * Don't allow mmap() of inherited per-task counters. This would
3026 * create a performance issue due to all children writing to the
3027 * same buffer.
3029 if (event->cpu == -1 && event->attr.inherit)
3030 return -EINVAL;
3032 if (!(vma->vm_flags & VM_SHARED))
3033 return -EINVAL;
3035 vma_size = vma->vm_end - vma->vm_start;
3036 nr_pages = (vma_size / PAGE_SIZE) - 1;
3039 * If we have buffer pages ensure they're a power-of-two number, so we
3040 * can do bitmasks instead of modulo.
3042 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3043 return -EINVAL;
3045 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3046 return -EINVAL;
3048 if (vma->vm_pgoff != 0)
3049 return -EINVAL;
3051 WARN_ON_ONCE(event->ctx->parent_ctx);
3052 mutex_lock(&event->mmap_mutex);
3053 if (event->buffer) {
3054 if (event->buffer->nr_pages == nr_pages)
3055 atomic_inc(&event->buffer->refcount);
3056 else
3057 ret = -EINVAL;
3058 goto unlock;
3061 user_extra = nr_pages + 1;
3062 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3065 * Increase the limit linearly with more CPUs:
3067 user_lock_limit *= num_online_cpus();
3069 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3071 extra = 0;
3072 if (user_locked > user_lock_limit)
3073 extra = user_locked - user_lock_limit;
3075 lock_limit = rlimit(RLIMIT_MEMLOCK);
3076 lock_limit >>= PAGE_SHIFT;
3077 locked = vma->vm_mm->locked_vm + extra;
3079 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3080 !capable(CAP_IPC_LOCK)) {
3081 ret = -EPERM;
3082 goto unlock;
3085 WARN_ON(event->buffer);
3087 if (vma->vm_flags & VM_WRITE)
3088 flags |= PERF_BUFFER_WRITABLE;
3090 buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3091 event->cpu, flags);
3092 if (!buffer) {
3093 ret = -ENOMEM;
3094 goto unlock;
3096 rcu_assign_pointer(event->buffer, buffer);
3098 atomic_long_add(user_extra, &user->locked_vm);
3099 event->mmap_locked = extra;
3100 event->mmap_user = get_current_user();
3101 vma->vm_mm->locked_vm += event->mmap_locked;
3103 unlock:
3104 if (!ret)
3105 atomic_inc(&event->mmap_count);
3106 mutex_unlock(&event->mmap_mutex);
3108 vma->vm_flags |= VM_RESERVED;
3109 vma->vm_ops = &perf_mmap_vmops;
3111 return ret;
3114 static int perf_fasync(int fd, struct file *filp, int on)
3116 struct inode *inode = filp->f_path.dentry->d_inode;
3117 struct perf_event *event = filp->private_data;
3118 int retval;
3120 mutex_lock(&inode->i_mutex);
3121 retval = fasync_helper(fd, filp, on, &event->fasync);
3122 mutex_unlock(&inode->i_mutex);
3124 if (retval < 0)
3125 return retval;
3127 return 0;
3130 static const struct file_operations perf_fops = {
3131 .llseek = no_llseek,
3132 .release = perf_release,
3133 .read = perf_read,
3134 .poll = perf_poll,
3135 .unlocked_ioctl = perf_ioctl,
3136 .compat_ioctl = perf_ioctl,
3137 .mmap = perf_mmap,
3138 .fasync = perf_fasync,
3142 * Perf event wakeup
3144 * If there's data, ensure we set the poll() state and publish everything
3145 * to user-space before waking everybody up.
3148 void perf_event_wakeup(struct perf_event *event)
3150 wake_up_all(&event->waitq);
3152 if (event->pending_kill) {
3153 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3154 event->pending_kill = 0;
3158 static void perf_pending_event(struct irq_work *entry)
3160 struct perf_event *event = container_of(entry,
3161 struct perf_event, pending);
3163 if (event->pending_disable) {
3164 event->pending_disable = 0;
3165 __perf_event_disable(event);
3168 if (event->pending_wakeup) {
3169 event->pending_wakeup = 0;
3170 perf_event_wakeup(event);
3175 * We assume there is only KVM supporting the callbacks.
3176 * Later on, we might change it to a list if there is
3177 * another virtualization implementation supporting the callbacks.
3179 struct perf_guest_info_callbacks *perf_guest_cbs;
3181 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3183 perf_guest_cbs = cbs;
3184 return 0;
3186 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3188 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3190 perf_guest_cbs = NULL;
3191 return 0;
3193 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3196 * Output
3198 static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
3199 unsigned long offset, unsigned long head)
3201 unsigned long mask;
3203 if (!buffer->writable)
3204 return true;
3206 mask = perf_data_size(buffer) - 1;
3208 offset = (offset - tail) & mask;
3209 head = (head - tail) & mask;
3211 if ((int)(head - offset) < 0)
3212 return false;
3214 return true;
3217 static void perf_output_wakeup(struct perf_output_handle *handle)
3219 atomic_set(&handle->buffer->poll, POLL_IN);
3221 if (handle->nmi) {
3222 handle->event->pending_wakeup = 1;
3223 irq_work_queue(&handle->event->pending);
3224 } else
3225 perf_event_wakeup(handle->event);
3229 * We need to ensure a later event_id doesn't publish a head when a former
3230 * event isn't done writing. However since we need to deal with NMIs we
3231 * cannot fully serialize things.
3233 * We only publish the head (and generate a wakeup) when the outer-most
3234 * event completes.
3236 static void perf_output_get_handle(struct perf_output_handle *handle)
3238 struct perf_buffer *buffer = handle->buffer;
3240 preempt_disable();
3241 local_inc(&buffer->nest);
3242 handle->wakeup = local_read(&buffer->wakeup);
3245 static void perf_output_put_handle(struct perf_output_handle *handle)
3247 struct perf_buffer *buffer = handle->buffer;
3248 unsigned long head;
3250 again:
3251 head = local_read(&buffer->head);
3254 * IRQ/NMI can happen here, which means we can miss a head update.
3257 if (!local_dec_and_test(&buffer->nest))
3258 goto out;
3261 * Publish the known good head. Rely on the full barrier implied
3262 * by atomic_dec_and_test() order the buffer->head read and this
3263 * write.
3265 buffer->user_page->data_head = head;
3268 * Now check if we missed an update, rely on the (compiler)
3269 * barrier in atomic_dec_and_test() to re-read buffer->head.
3271 if (unlikely(head != local_read(&buffer->head))) {
3272 local_inc(&buffer->nest);
3273 goto again;
3276 if (handle->wakeup != local_read(&buffer->wakeup))
3277 perf_output_wakeup(handle);
3279 out:
3280 preempt_enable();
3283 __always_inline void perf_output_copy(struct perf_output_handle *handle,
3284 const void *buf, unsigned int len)
3286 do {
3287 unsigned long size = min_t(unsigned long, handle->size, len);
3289 memcpy(handle->addr, buf, size);
3291 len -= size;
3292 handle->addr += size;
3293 buf += size;
3294 handle->size -= size;
3295 if (!handle->size) {
3296 struct perf_buffer *buffer = handle->buffer;
3298 handle->page++;
3299 handle->page &= buffer->nr_pages - 1;
3300 handle->addr = buffer->data_pages[handle->page];
3301 handle->size = PAGE_SIZE << page_order(buffer);
3303 } while (len);
3306 int perf_output_begin(struct perf_output_handle *handle,
3307 struct perf_event *event, unsigned int size,
3308 int nmi, int sample)
3310 struct perf_buffer *buffer;
3311 unsigned long tail, offset, head;
3312 int have_lost;
3313 struct {
3314 struct perf_event_header header;
3315 u64 id;
3316 u64 lost;
3317 } lost_event;
3319 rcu_read_lock();
3321 * For inherited events we send all the output towards the parent.
3323 if (event->parent)
3324 event = event->parent;
3326 buffer = rcu_dereference(event->buffer);
3327 if (!buffer)
3328 goto out;
3330 handle->buffer = buffer;
3331 handle->event = event;
3332 handle->nmi = nmi;
3333 handle->sample = sample;
3335 if (!buffer->nr_pages)
3336 goto out;
3338 have_lost = local_read(&buffer->lost);
3339 if (have_lost)
3340 size += sizeof(lost_event);
3342 perf_output_get_handle(handle);
3344 do {
3346 * Userspace could choose to issue a mb() before updating the
3347 * tail pointer. So that all reads will be completed before the
3348 * write is issued.
3350 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3351 smp_rmb();
3352 offset = head = local_read(&buffer->head);
3353 head += size;
3354 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3355 goto fail;
3356 } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3358 if (head - local_read(&buffer->wakeup) > buffer->watermark)
3359 local_add(buffer->watermark, &buffer->wakeup);
3361 handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3362 handle->page &= buffer->nr_pages - 1;
3363 handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3364 handle->addr = buffer->data_pages[handle->page];
3365 handle->addr += handle->size;
3366 handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3368 if (have_lost) {
3369 lost_event.header.type = PERF_RECORD_LOST;
3370 lost_event.header.misc = 0;
3371 lost_event.header.size = sizeof(lost_event);
3372 lost_event.id = event->id;
3373 lost_event.lost = local_xchg(&buffer->lost, 0);
3375 perf_output_put(handle, lost_event);
3378 return 0;
3380 fail:
3381 local_inc(&buffer->lost);
3382 perf_output_put_handle(handle);
3383 out:
3384 rcu_read_unlock();
3386 return -ENOSPC;
3389 void perf_output_end(struct perf_output_handle *handle)
3391 struct perf_event *event = handle->event;
3392 struct perf_buffer *buffer = handle->buffer;
3394 int wakeup_events = event->attr.wakeup_events;
3396 if (handle->sample && wakeup_events) {
3397 int events = local_inc_return(&buffer->events);
3398 if (events >= wakeup_events) {
3399 local_sub(wakeup_events, &buffer->events);
3400 local_inc(&buffer->wakeup);
3404 perf_output_put_handle(handle);
3405 rcu_read_unlock();
3408 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3411 * only top level events have the pid namespace they were created in
3413 if (event->parent)
3414 event = event->parent;
3416 return task_tgid_nr_ns(p, event->ns);
3419 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3422 * only top level events have the pid namespace they were created in
3424 if (event->parent)
3425 event = event->parent;
3427 return task_pid_nr_ns(p, event->ns);
3430 static void perf_output_read_one(struct perf_output_handle *handle,
3431 struct perf_event *event)
3433 u64 read_format = event->attr.read_format;
3434 u64 values[4];
3435 int n = 0;
3437 values[n++] = perf_event_count(event);
3438 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3439 values[n++] = event->total_time_enabled +
3440 atomic64_read(&event->child_total_time_enabled);
3442 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3443 values[n++] = event->total_time_running +
3444 atomic64_read(&event->child_total_time_running);
3446 if (read_format & PERF_FORMAT_ID)
3447 values[n++] = primary_event_id(event);
3449 perf_output_copy(handle, values, n * sizeof(u64));
3453 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3455 static void perf_output_read_group(struct perf_output_handle *handle,
3456 struct perf_event *event)
3458 struct perf_event *leader = event->group_leader, *sub;
3459 u64 read_format = event->attr.read_format;
3460 u64 values[5];
3461 int n = 0;
3463 values[n++] = 1 + leader->nr_siblings;
3465 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3466 values[n++] = leader->total_time_enabled;
3468 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3469 values[n++] = leader->total_time_running;
3471 if (leader != event)
3472 leader->pmu->read(leader);
3474 values[n++] = perf_event_count(leader);
3475 if (read_format & PERF_FORMAT_ID)
3476 values[n++] = primary_event_id(leader);
3478 perf_output_copy(handle, values, n * sizeof(u64));
3480 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3481 n = 0;
3483 if (sub != event)
3484 sub->pmu->read(sub);
3486 values[n++] = perf_event_count(sub);
3487 if (read_format & PERF_FORMAT_ID)
3488 values[n++] = primary_event_id(sub);
3490 perf_output_copy(handle, values, n * sizeof(u64));
3494 static void perf_output_read(struct perf_output_handle *handle,
3495 struct perf_event *event)
3497 if (event->attr.read_format & PERF_FORMAT_GROUP)
3498 perf_output_read_group(handle, event);
3499 else
3500 perf_output_read_one(handle, event);
3503 void perf_output_sample(struct perf_output_handle *handle,
3504 struct perf_event_header *header,
3505 struct perf_sample_data *data,
3506 struct perf_event *event)
3508 u64 sample_type = data->type;
3510 perf_output_put(handle, *header);
3512 if (sample_type & PERF_SAMPLE_IP)
3513 perf_output_put(handle, data->ip);
3515 if (sample_type & PERF_SAMPLE_TID)
3516 perf_output_put(handle, data->tid_entry);
3518 if (sample_type & PERF_SAMPLE_TIME)
3519 perf_output_put(handle, data->time);
3521 if (sample_type & PERF_SAMPLE_ADDR)
3522 perf_output_put(handle, data->addr);
3524 if (sample_type & PERF_SAMPLE_ID)
3525 perf_output_put(handle, data->id);
3527 if (sample_type & PERF_SAMPLE_STREAM_ID)
3528 perf_output_put(handle, data->stream_id);
3530 if (sample_type & PERF_SAMPLE_CPU)
3531 perf_output_put(handle, data->cpu_entry);
3533 if (sample_type & PERF_SAMPLE_PERIOD)
3534 perf_output_put(handle, data->period);
3536 if (sample_type & PERF_SAMPLE_READ)
3537 perf_output_read(handle, event);
3539 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3540 if (data->callchain) {
3541 int size = 1;
3543 if (data->callchain)
3544 size += data->callchain->nr;
3546 size *= sizeof(u64);
3548 perf_output_copy(handle, data->callchain, size);
3549 } else {
3550 u64 nr = 0;
3551 perf_output_put(handle, nr);
3555 if (sample_type & PERF_SAMPLE_RAW) {
3556 if (data->raw) {
3557 perf_output_put(handle, data->raw->size);
3558 perf_output_copy(handle, data->raw->data,
3559 data->raw->size);
3560 } else {
3561 struct {
3562 u32 size;
3563 u32 data;
3564 } raw = {
3565 .size = sizeof(u32),
3566 .data = 0,
3568 perf_output_put(handle, raw);
3573 void perf_prepare_sample(struct perf_event_header *header,
3574 struct perf_sample_data *data,
3575 struct perf_event *event,
3576 struct pt_regs *regs)
3578 u64 sample_type = event->attr.sample_type;
3580 data->type = sample_type;
3582 header->type = PERF_RECORD_SAMPLE;
3583 header->size = sizeof(*header);
3585 header->misc = 0;
3586 header->misc |= perf_misc_flags(regs);
3588 if (sample_type & PERF_SAMPLE_IP) {
3589 data->ip = perf_instruction_pointer(regs);
3591 header->size += sizeof(data->ip);
3594 if (sample_type & PERF_SAMPLE_TID) {
3595 /* namespace issues */
3596 data->tid_entry.pid = perf_event_pid(event, current);
3597 data->tid_entry.tid = perf_event_tid(event, current);
3599 header->size += sizeof(data->tid_entry);
3602 if (sample_type & PERF_SAMPLE_TIME) {
3603 data->time = perf_clock();
3605 header->size += sizeof(data->time);
3608 if (sample_type & PERF_SAMPLE_ADDR)
3609 header->size += sizeof(data->addr);
3611 if (sample_type & PERF_SAMPLE_ID) {
3612 data->id = primary_event_id(event);
3614 header->size += sizeof(data->id);
3617 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3618 data->stream_id = event->id;
3620 header->size += sizeof(data->stream_id);
3623 if (sample_type & PERF_SAMPLE_CPU) {
3624 data->cpu_entry.cpu = raw_smp_processor_id();
3625 data->cpu_entry.reserved = 0;
3627 header->size += sizeof(data->cpu_entry);
3630 if (sample_type & PERF_SAMPLE_PERIOD)
3631 header->size += sizeof(data->period);
3633 if (sample_type & PERF_SAMPLE_READ)
3634 header->size += perf_event_read_size(event);
3636 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3637 int size = 1;
3639 data->callchain = perf_callchain(regs);
3641 if (data->callchain)
3642 size += data->callchain->nr;
3644 header->size += size * sizeof(u64);
3647 if (sample_type & PERF_SAMPLE_RAW) {
3648 int size = sizeof(u32);
3650 if (data->raw)
3651 size += data->raw->size;
3652 else
3653 size += sizeof(u32);
3655 WARN_ON_ONCE(size & (sizeof(u64)-1));
3656 header->size += size;
3660 static void perf_event_output(struct perf_event *event, int nmi,
3661 struct perf_sample_data *data,
3662 struct pt_regs *regs)
3664 struct perf_output_handle handle;
3665 struct perf_event_header header;
3667 /* protect the callchain buffers */
3668 rcu_read_lock();
3670 perf_prepare_sample(&header, data, event, regs);
3672 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3673 goto exit;
3675 perf_output_sample(&handle, &header, data, event);
3677 perf_output_end(&handle);
3679 exit:
3680 rcu_read_unlock();
3684 * read event_id
3687 struct perf_read_event {
3688 struct perf_event_header header;
3690 u32 pid;
3691 u32 tid;
3694 static void
3695 perf_event_read_event(struct perf_event *event,
3696 struct task_struct *task)
3698 struct perf_output_handle handle;
3699 struct perf_read_event read_event = {
3700 .header = {
3701 .type = PERF_RECORD_READ,
3702 .misc = 0,
3703 .size = sizeof(read_event) + perf_event_read_size(event),
3705 .pid = perf_event_pid(event, task),
3706 .tid = perf_event_tid(event, task),
3708 int ret;
3710 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3711 if (ret)
3712 return;
3714 perf_output_put(&handle, read_event);
3715 perf_output_read(&handle, event);
3717 perf_output_end(&handle);
3721 * task tracking -- fork/exit
3723 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3726 struct perf_task_event {
3727 struct task_struct *task;
3728 struct perf_event_context *task_ctx;
3730 struct {
3731 struct perf_event_header header;
3733 u32 pid;
3734 u32 ppid;
3735 u32 tid;
3736 u32 ptid;
3737 u64 time;
3738 } event_id;
3741 static void perf_event_task_output(struct perf_event *event,
3742 struct perf_task_event *task_event)
3744 struct perf_output_handle handle;
3745 struct task_struct *task = task_event->task;
3746 int size, ret;
3748 size = task_event->event_id.header.size;
3749 ret = perf_output_begin(&handle, event, size, 0, 0);
3751 if (ret)
3752 return;
3754 task_event->event_id.pid = perf_event_pid(event, task);
3755 task_event->event_id.ppid = perf_event_pid(event, current);
3757 task_event->event_id.tid = perf_event_tid(event, task);
3758 task_event->event_id.ptid = perf_event_tid(event, current);
3760 perf_output_put(&handle, task_event->event_id);
3762 perf_output_end(&handle);
3765 static int perf_event_task_match(struct perf_event *event)
3767 if (event->state < PERF_EVENT_STATE_INACTIVE)
3768 return 0;
3770 if (event->cpu != -1 && event->cpu != smp_processor_id())
3771 return 0;
3773 if (event->attr.comm || event->attr.mmap ||
3774 event->attr.mmap_data || event->attr.task)
3775 return 1;
3777 return 0;
3780 static void perf_event_task_ctx(struct perf_event_context *ctx,
3781 struct perf_task_event *task_event)
3783 struct perf_event *event;
3785 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3786 if (perf_event_task_match(event))
3787 perf_event_task_output(event, task_event);
3791 static void perf_event_task_event(struct perf_task_event *task_event)
3793 struct perf_cpu_context *cpuctx;
3794 struct perf_event_context *ctx;
3795 struct pmu *pmu;
3796 int ctxn;
3798 rcu_read_lock();
3799 list_for_each_entry_rcu(pmu, &pmus, entry) {
3800 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3801 perf_event_task_ctx(&cpuctx->ctx, task_event);
3803 ctx = task_event->task_ctx;
3804 if (!ctx) {
3805 ctxn = pmu->task_ctx_nr;
3806 if (ctxn < 0)
3807 goto next;
3808 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3810 if (ctx)
3811 perf_event_task_ctx(ctx, task_event);
3812 next:
3813 put_cpu_ptr(pmu->pmu_cpu_context);
3815 rcu_read_unlock();
3818 static void perf_event_task(struct task_struct *task,
3819 struct perf_event_context *task_ctx,
3820 int new)
3822 struct perf_task_event task_event;
3824 if (!atomic_read(&nr_comm_events) &&
3825 !atomic_read(&nr_mmap_events) &&
3826 !atomic_read(&nr_task_events))
3827 return;
3829 task_event = (struct perf_task_event){
3830 .task = task,
3831 .task_ctx = task_ctx,
3832 .event_id = {
3833 .header = {
3834 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3835 .misc = 0,
3836 .size = sizeof(task_event.event_id),
3838 /* .pid */
3839 /* .ppid */
3840 /* .tid */
3841 /* .ptid */
3842 .time = perf_clock(),
3846 perf_event_task_event(&task_event);
3849 void perf_event_fork(struct task_struct *task)
3851 perf_event_task(task, NULL, 1);
3855 * comm tracking
3858 struct perf_comm_event {
3859 struct task_struct *task;
3860 char *comm;
3861 int comm_size;
3863 struct {
3864 struct perf_event_header header;
3866 u32 pid;
3867 u32 tid;
3868 } event_id;
3871 static void perf_event_comm_output(struct perf_event *event,
3872 struct perf_comm_event *comm_event)
3874 struct perf_output_handle handle;
3875 int size = comm_event->event_id.header.size;
3876 int ret = perf_output_begin(&handle, event, size, 0, 0);
3878 if (ret)
3879 return;
3881 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3882 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3884 perf_output_put(&handle, comm_event->event_id);
3885 perf_output_copy(&handle, comm_event->comm,
3886 comm_event->comm_size);
3887 perf_output_end(&handle);
3890 static int perf_event_comm_match(struct perf_event *event)
3892 if (event->state < PERF_EVENT_STATE_INACTIVE)
3893 return 0;
3895 if (event->cpu != -1 && event->cpu != smp_processor_id())
3896 return 0;
3898 if (event->attr.comm)
3899 return 1;
3901 return 0;
3904 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3905 struct perf_comm_event *comm_event)
3907 struct perf_event *event;
3909 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3910 if (perf_event_comm_match(event))
3911 perf_event_comm_output(event, comm_event);
3915 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3917 struct perf_cpu_context *cpuctx;
3918 struct perf_event_context *ctx;
3919 char comm[TASK_COMM_LEN];
3920 unsigned int size;
3921 struct pmu *pmu;
3922 int ctxn;
3924 memset(comm, 0, sizeof(comm));
3925 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3926 size = ALIGN(strlen(comm)+1, sizeof(u64));
3928 comm_event->comm = comm;
3929 comm_event->comm_size = size;
3931 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3933 rcu_read_lock();
3934 list_for_each_entry_rcu(pmu, &pmus, entry) {
3935 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3936 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3938 ctxn = pmu->task_ctx_nr;
3939 if (ctxn < 0)
3940 goto next;
3942 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3943 if (ctx)
3944 perf_event_comm_ctx(ctx, comm_event);
3945 next:
3946 put_cpu_ptr(pmu->pmu_cpu_context);
3948 rcu_read_unlock();
3951 void perf_event_comm(struct task_struct *task)
3953 struct perf_comm_event comm_event;
3954 struct perf_event_context *ctx;
3955 int ctxn;
3957 for_each_task_context_nr(ctxn) {
3958 ctx = task->perf_event_ctxp[ctxn];
3959 if (!ctx)
3960 continue;
3962 perf_event_enable_on_exec(ctx);
3965 if (!atomic_read(&nr_comm_events))
3966 return;
3968 comm_event = (struct perf_comm_event){
3969 .task = task,
3970 /* .comm */
3971 /* .comm_size */
3972 .event_id = {
3973 .header = {
3974 .type = PERF_RECORD_COMM,
3975 .misc = 0,
3976 /* .size */
3978 /* .pid */
3979 /* .tid */
3983 perf_event_comm_event(&comm_event);
3987 * mmap tracking
3990 struct perf_mmap_event {
3991 struct vm_area_struct *vma;
3993 const char *file_name;
3994 int file_size;
3996 struct {
3997 struct perf_event_header header;
3999 u32 pid;
4000 u32 tid;
4001 u64 start;
4002 u64 len;
4003 u64 pgoff;
4004 } event_id;
4007 static void perf_event_mmap_output(struct perf_event *event,
4008 struct perf_mmap_event *mmap_event)
4010 struct perf_output_handle handle;
4011 int size = mmap_event->event_id.header.size;
4012 int ret = perf_output_begin(&handle, event, size, 0, 0);
4014 if (ret)
4015 return;
4017 mmap_event->event_id.pid = perf_event_pid(event, current);
4018 mmap_event->event_id.tid = perf_event_tid(event, current);
4020 perf_output_put(&handle, mmap_event->event_id);
4021 perf_output_copy(&handle, mmap_event->file_name,
4022 mmap_event->file_size);
4023 perf_output_end(&handle);
4026 static int perf_event_mmap_match(struct perf_event *event,
4027 struct perf_mmap_event *mmap_event,
4028 int executable)
4030 if (event->state < PERF_EVENT_STATE_INACTIVE)
4031 return 0;
4033 if (event->cpu != -1 && event->cpu != smp_processor_id())
4034 return 0;
4036 if ((!executable && event->attr.mmap_data) ||
4037 (executable && event->attr.mmap))
4038 return 1;
4040 return 0;
4043 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4044 struct perf_mmap_event *mmap_event,
4045 int executable)
4047 struct perf_event *event;
4049 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4050 if (perf_event_mmap_match(event, mmap_event, executable))
4051 perf_event_mmap_output(event, mmap_event);
4055 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4057 struct perf_cpu_context *cpuctx;
4058 struct perf_event_context *ctx;
4059 struct vm_area_struct *vma = mmap_event->vma;
4060 struct file *file = vma->vm_file;
4061 unsigned int size;
4062 char tmp[16];
4063 char *buf = NULL;
4064 const char *name;
4065 struct pmu *pmu;
4066 int ctxn;
4068 memset(tmp, 0, sizeof(tmp));
4070 if (file) {
4072 * d_path works from the end of the buffer backwards, so we
4073 * need to add enough zero bytes after the string to handle
4074 * the 64bit alignment we do later.
4076 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4077 if (!buf) {
4078 name = strncpy(tmp, "//enomem", sizeof(tmp));
4079 goto got_name;
4081 name = d_path(&file->f_path, buf, PATH_MAX);
4082 if (IS_ERR(name)) {
4083 name = strncpy(tmp, "//toolong", sizeof(tmp));
4084 goto got_name;
4086 } else {
4087 if (arch_vma_name(mmap_event->vma)) {
4088 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4089 sizeof(tmp));
4090 goto got_name;
4093 if (!vma->vm_mm) {
4094 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4095 goto got_name;
4096 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4097 vma->vm_end >= vma->vm_mm->brk) {
4098 name = strncpy(tmp, "[heap]", sizeof(tmp));
4099 goto got_name;
4100 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4101 vma->vm_end >= vma->vm_mm->start_stack) {
4102 name = strncpy(tmp, "[stack]", sizeof(tmp));
4103 goto got_name;
4106 name = strncpy(tmp, "//anon", sizeof(tmp));
4107 goto got_name;
4110 got_name:
4111 size = ALIGN(strlen(name)+1, sizeof(u64));
4113 mmap_event->file_name = name;
4114 mmap_event->file_size = size;
4116 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4118 rcu_read_lock();
4119 list_for_each_entry_rcu(pmu, &pmus, entry) {
4120 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4121 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4122 vma->vm_flags & VM_EXEC);
4124 ctxn = pmu->task_ctx_nr;
4125 if (ctxn < 0)
4126 goto next;
4128 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4129 if (ctx) {
4130 perf_event_mmap_ctx(ctx, mmap_event,
4131 vma->vm_flags & VM_EXEC);
4133 next:
4134 put_cpu_ptr(pmu->pmu_cpu_context);
4136 rcu_read_unlock();
4138 kfree(buf);
4141 void perf_event_mmap(struct vm_area_struct *vma)
4143 struct perf_mmap_event mmap_event;
4145 if (!atomic_read(&nr_mmap_events))
4146 return;
4148 mmap_event = (struct perf_mmap_event){
4149 .vma = vma,
4150 /* .file_name */
4151 /* .file_size */
4152 .event_id = {
4153 .header = {
4154 .type = PERF_RECORD_MMAP,
4155 .misc = PERF_RECORD_MISC_USER,
4156 /* .size */
4158 /* .pid */
4159 /* .tid */
4160 .start = vma->vm_start,
4161 .len = vma->vm_end - vma->vm_start,
4162 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
4166 perf_event_mmap_event(&mmap_event);
4170 * IRQ throttle logging
4173 static void perf_log_throttle(struct perf_event *event, int enable)
4175 struct perf_output_handle handle;
4176 int ret;
4178 struct {
4179 struct perf_event_header header;
4180 u64 time;
4181 u64 id;
4182 u64 stream_id;
4183 } throttle_event = {
4184 .header = {
4185 .type = PERF_RECORD_THROTTLE,
4186 .misc = 0,
4187 .size = sizeof(throttle_event),
4189 .time = perf_clock(),
4190 .id = primary_event_id(event),
4191 .stream_id = event->id,
4194 if (enable)
4195 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4197 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4198 if (ret)
4199 return;
4201 perf_output_put(&handle, throttle_event);
4202 perf_output_end(&handle);
4206 * Generic event overflow handling, sampling.
4209 static int __perf_event_overflow(struct perf_event *event, int nmi,
4210 int throttle, struct perf_sample_data *data,
4211 struct pt_regs *regs)
4213 int events = atomic_read(&event->event_limit);
4214 struct hw_perf_event *hwc = &event->hw;
4215 int ret = 0;
4217 if (!throttle) {
4218 hwc->interrupts++;
4219 } else {
4220 if (hwc->interrupts != MAX_INTERRUPTS) {
4221 hwc->interrupts++;
4222 if (HZ * hwc->interrupts >
4223 (u64)sysctl_perf_event_sample_rate) {
4224 hwc->interrupts = MAX_INTERRUPTS;
4225 perf_log_throttle(event, 0);
4226 ret = 1;
4228 } else {
4230 * Keep re-disabling events even though on the previous
4231 * pass we disabled it - just in case we raced with a
4232 * sched-in and the event got enabled again:
4234 ret = 1;
4238 if (event->attr.freq) {
4239 u64 now = perf_clock();
4240 s64 delta = now - hwc->freq_time_stamp;
4242 hwc->freq_time_stamp = now;
4244 if (delta > 0 && delta < 2*TICK_NSEC)
4245 perf_adjust_period(event, delta, hwc->last_period);
4249 * XXX event_limit might not quite work as expected on inherited
4250 * events
4253 event->pending_kill = POLL_IN;
4254 if (events && atomic_dec_and_test(&event->event_limit)) {
4255 ret = 1;
4256 event->pending_kill = POLL_HUP;
4257 if (nmi) {
4258 event->pending_disable = 1;
4259 irq_work_queue(&event->pending);
4260 } else
4261 perf_event_disable(event);
4264 if (event->overflow_handler)
4265 event->overflow_handler(event, nmi, data, regs);
4266 else
4267 perf_event_output(event, nmi, data, regs);
4269 return ret;
4272 int perf_event_overflow(struct perf_event *event, int nmi,
4273 struct perf_sample_data *data,
4274 struct pt_regs *regs)
4276 return __perf_event_overflow(event, nmi, 1, data, regs);
4280 * Generic software event infrastructure
4283 struct swevent_htable {
4284 struct swevent_hlist *swevent_hlist;
4285 struct mutex hlist_mutex;
4286 int hlist_refcount;
4288 /* Recursion avoidance in each contexts */
4289 int recursion[PERF_NR_CONTEXTS];
4292 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4295 * We directly increment event->count and keep a second value in
4296 * event->hw.period_left to count intervals. This period event
4297 * is kept in the range [-sample_period, 0] so that we can use the
4298 * sign as trigger.
4301 static u64 perf_swevent_set_period(struct perf_event *event)
4303 struct hw_perf_event *hwc = &event->hw;
4304 u64 period = hwc->last_period;
4305 u64 nr, offset;
4306 s64 old, val;
4308 hwc->last_period = hwc->sample_period;
4310 again:
4311 old = val = local64_read(&hwc->period_left);
4312 if (val < 0)
4313 return 0;
4315 nr = div64_u64(period + val, period);
4316 offset = nr * period;
4317 val -= offset;
4318 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4319 goto again;
4321 return nr;
4324 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4325 int nmi, struct perf_sample_data *data,
4326 struct pt_regs *regs)
4328 struct hw_perf_event *hwc = &event->hw;
4329 int throttle = 0;
4331 data->period = event->hw.last_period;
4332 if (!overflow)
4333 overflow = perf_swevent_set_period(event);
4335 if (hwc->interrupts == MAX_INTERRUPTS)
4336 return;
4338 for (; overflow; overflow--) {
4339 if (__perf_event_overflow(event, nmi, throttle,
4340 data, regs)) {
4342 * We inhibit the overflow from happening when
4343 * hwc->interrupts == MAX_INTERRUPTS.
4345 break;
4347 throttle = 1;
4351 static void perf_swevent_event(struct perf_event *event, u64 nr,
4352 int nmi, struct perf_sample_data *data,
4353 struct pt_regs *regs)
4355 struct hw_perf_event *hwc = &event->hw;
4357 local64_add(nr, &event->count);
4359 if (!regs)
4360 return;
4362 if (!hwc->sample_period)
4363 return;
4365 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4366 return perf_swevent_overflow(event, 1, nmi, data, regs);
4368 if (local64_add_negative(nr, &hwc->period_left))
4369 return;
4371 perf_swevent_overflow(event, 0, nmi, data, regs);
4374 static int perf_exclude_event(struct perf_event *event,
4375 struct pt_regs *regs)
4377 if (event->hw.state & PERF_HES_STOPPED)
4378 return 0;
4380 if (regs) {
4381 if (event->attr.exclude_user && user_mode(regs))
4382 return 1;
4384 if (event->attr.exclude_kernel && !user_mode(regs))
4385 return 1;
4388 return 0;
4391 static int perf_swevent_match(struct perf_event *event,
4392 enum perf_type_id type,
4393 u32 event_id,
4394 struct perf_sample_data *data,
4395 struct pt_regs *regs)
4397 if (event->attr.type != type)
4398 return 0;
4400 if (event->attr.config != event_id)
4401 return 0;
4403 if (perf_exclude_event(event, regs))
4404 return 0;
4406 return 1;
4409 static inline u64 swevent_hash(u64 type, u32 event_id)
4411 u64 val = event_id | (type << 32);
4413 return hash_64(val, SWEVENT_HLIST_BITS);
4416 static inline struct hlist_head *
4417 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4419 u64 hash = swevent_hash(type, event_id);
4421 return &hlist->heads[hash];
4424 /* For the read side: events when they trigger */
4425 static inline struct hlist_head *
4426 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4428 struct swevent_hlist *hlist;
4430 hlist = rcu_dereference(swhash->swevent_hlist);
4431 if (!hlist)
4432 return NULL;
4434 return __find_swevent_head(hlist, type, event_id);
4437 /* For the event head insertion and removal in the hlist */
4438 static inline struct hlist_head *
4439 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4441 struct swevent_hlist *hlist;
4442 u32 event_id = event->attr.config;
4443 u64 type = event->attr.type;
4446 * Event scheduling is always serialized against hlist allocation
4447 * and release. Which makes the protected version suitable here.
4448 * The context lock guarantees that.
4450 hlist = rcu_dereference_protected(swhash->swevent_hlist,
4451 lockdep_is_held(&event->ctx->lock));
4452 if (!hlist)
4453 return NULL;
4455 return __find_swevent_head(hlist, type, event_id);
4458 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4459 u64 nr, int nmi,
4460 struct perf_sample_data *data,
4461 struct pt_regs *regs)
4463 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4464 struct perf_event *event;
4465 struct hlist_node *node;
4466 struct hlist_head *head;
4468 rcu_read_lock();
4469 head = find_swevent_head_rcu(swhash, type, event_id);
4470 if (!head)
4471 goto end;
4473 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4474 if (perf_swevent_match(event, type, event_id, data, regs))
4475 perf_swevent_event(event, nr, nmi, data, regs);
4477 end:
4478 rcu_read_unlock();
4481 int perf_swevent_get_recursion_context(void)
4483 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4485 return get_recursion_context(swhash->recursion);
4487 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4489 void inline perf_swevent_put_recursion_context(int rctx)
4491 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4493 put_recursion_context(swhash->recursion, rctx);
4496 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4497 struct pt_regs *regs, u64 addr)
4499 struct perf_sample_data data;
4500 int rctx;
4502 preempt_disable_notrace();
4503 rctx = perf_swevent_get_recursion_context();
4504 if (rctx < 0)
4505 return;
4507 perf_sample_data_init(&data, addr);
4509 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4511 perf_swevent_put_recursion_context(rctx);
4512 preempt_enable_notrace();
4515 static void perf_swevent_read(struct perf_event *event)
4519 static int perf_swevent_add(struct perf_event *event, int flags)
4521 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4522 struct hw_perf_event *hwc = &event->hw;
4523 struct hlist_head *head;
4525 if (hwc->sample_period) {
4526 hwc->last_period = hwc->sample_period;
4527 perf_swevent_set_period(event);
4530 hwc->state = !(flags & PERF_EF_START);
4532 head = find_swevent_head(swhash, event);
4533 if (WARN_ON_ONCE(!head))
4534 return -EINVAL;
4536 hlist_add_head_rcu(&event->hlist_entry, head);
4538 return 0;
4541 static void perf_swevent_del(struct perf_event *event, int flags)
4543 hlist_del_rcu(&event->hlist_entry);
4546 static void perf_swevent_start(struct perf_event *event, int flags)
4548 event->hw.state = 0;
4551 static void perf_swevent_stop(struct perf_event *event, int flags)
4553 event->hw.state = PERF_HES_STOPPED;
4556 /* Deref the hlist from the update side */
4557 static inline struct swevent_hlist *
4558 swevent_hlist_deref(struct swevent_htable *swhash)
4560 return rcu_dereference_protected(swhash->swevent_hlist,
4561 lockdep_is_held(&swhash->hlist_mutex));
4564 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4566 struct swevent_hlist *hlist;
4568 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4569 kfree(hlist);
4572 static void swevent_hlist_release(struct swevent_htable *swhash)
4574 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4576 if (!hlist)
4577 return;
4579 rcu_assign_pointer(swhash->swevent_hlist, NULL);
4580 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4583 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4585 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4587 mutex_lock(&swhash->hlist_mutex);
4589 if (!--swhash->hlist_refcount)
4590 swevent_hlist_release(swhash);
4592 mutex_unlock(&swhash->hlist_mutex);
4595 static void swevent_hlist_put(struct perf_event *event)
4597 int cpu;
4599 if (event->cpu != -1) {
4600 swevent_hlist_put_cpu(event, event->cpu);
4601 return;
4604 for_each_possible_cpu(cpu)
4605 swevent_hlist_put_cpu(event, cpu);
4608 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4610 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4611 int err = 0;
4613 mutex_lock(&swhash->hlist_mutex);
4615 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4616 struct swevent_hlist *hlist;
4618 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4619 if (!hlist) {
4620 err = -ENOMEM;
4621 goto exit;
4623 rcu_assign_pointer(swhash->swevent_hlist, hlist);
4625 swhash->hlist_refcount++;
4626 exit:
4627 mutex_unlock(&swhash->hlist_mutex);
4629 return err;
4632 static int swevent_hlist_get(struct perf_event *event)
4634 int err;
4635 int cpu, failed_cpu;
4637 if (event->cpu != -1)
4638 return swevent_hlist_get_cpu(event, event->cpu);
4640 get_online_cpus();
4641 for_each_possible_cpu(cpu) {
4642 err = swevent_hlist_get_cpu(event, cpu);
4643 if (err) {
4644 failed_cpu = cpu;
4645 goto fail;
4648 put_online_cpus();
4650 return 0;
4651 fail:
4652 for_each_possible_cpu(cpu) {
4653 if (cpu == failed_cpu)
4654 break;
4655 swevent_hlist_put_cpu(event, cpu);
4658 put_online_cpus();
4659 return err;
4662 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4664 static void sw_perf_event_destroy(struct perf_event *event)
4666 u64 event_id = event->attr.config;
4668 WARN_ON(event->parent);
4670 jump_label_dec(&perf_swevent_enabled[event_id]);
4671 swevent_hlist_put(event);
4674 static int perf_swevent_init(struct perf_event *event)
4676 int event_id = event->attr.config;
4678 if (event->attr.type != PERF_TYPE_SOFTWARE)
4679 return -ENOENT;
4681 switch (event_id) {
4682 case PERF_COUNT_SW_CPU_CLOCK:
4683 case PERF_COUNT_SW_TASK_CLOCK:
4684 return -ENOENT;
4686 default:
4687 break;
4690 if (event_id > PERF_COUNT_SW_MAX)
4691 return -ENOENT;
4693 if (!event->parent) {
4694 int err;
4696 err = swevent_hlist_get(event);
4697 if (err)
4698 return err;
4700 jump_label_inc(&perf_swevent_enabled[event_id]);
4701 event->destroy = sw_perf_event_destroy;
4704 return 0;
4707 static struct pmu perf_swevent = {
4708 .task_ctx_nr = perf_sw_context,
4710 .event_init = perf_swevent_init,
4711 .add = perf_swevent_add,
4712 .del = perf_swevent_del,
4713 .start = perf_swevent_start,
4714 .stop = perf_swevent_stop,
4715 .read = perf_swevent_read,
4718 #ifdef CONFIG_EVENT_TRACING
4720 static int perf_tp_filter_match(struct perf_event *event,
4721 struct perf_sample_data *data)
4723 void *record = data->raw->data;
4725 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4726 return 1;
4727 return 0;
4730 static int perf_tp_event_match(struct perf_event *event,
4731 struct perf_sample_data *data,
4732 struct pt_regs *regs)
4735 * All tracepoints are from kernel-space.
4737 if (event->attr.exclude_kernel)
4738 return 0;
4740 if (!perf_tp_filter_match(event, data))
4741 return 0;
4743 return 1;
4746 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4747 struct pt_regs *regs, struct hlist_head *head, int rctx)
4749 struct perf_sample_data data;
4750 struct perf_event *event;
4751 struct hlist_node *node;
4753 struct perf_raw_record raw = {
4754 .size = entry_size,
4755 .data = record,
4758 perf_sample_data_init(&data, addr);
4759 data.raw = &raw;
4761 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4762 if (perf_tp_event_match(event, &data, regs))
4763 perf_swevent_event(event, count, 1, &data, regs);
4766 perf_swevent_put_recursion_context(rctx);
4768 EXPORT_SYMBOL_GPL(perf_tp_event);
4770 static void tp_perf_event_destroy(struct perf_event *event)
4772 perf_trace_destroy(event);
4775 static int perf_tp_event_init(struct perf_event *event)
4777 int err;
4779 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4780 return -ENOENT;
4783 * Raw tracepoint data is a severe data leak, only allow root to
4784 * have these.
4786 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4787 perf_paranoid_tracepoint_raw() &&
4788 !capable(CAP_SYS_ADMIN))
4789 return -EPERM;
4791 err = perf_trace_init(event);
4792 if (err)
4793 return err;
4795 event->destroy = tp_perf_event_destroy;
4797 return 0;
4800 static struct pmu perf_tracepoint = {
4801 .task_ctx_nr = perf_sw_context,
4803 .event_init = perf_tp_event_init,
4804 .add = perf_trace_add,
4805 .del = perf_trace_del,
4806 .start = perf_swevent_start,
4807 .stop = perf_swevent_stop,
4808 .read = perf_swevent_read,
4811 static inline void perf_tp_register(void)
4813 perf_pmu_register(&perf_tracepoint);
4816 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4818 char *filter_str;
4819 int ret;
4821 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4822 return -EINVAL;
4824 filter_str = strndup_user(arg, PAGE_SIZE);
4825 if (IS_ERR(filter_str))
4826 return PTR_ERR(filter_str);
4828 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4830 kfree(filter_str);
4831 return ret;
4834 static void perf_event_free_filter(struct perf_event *event)
4836 ftrace_profile_free_filter(event);
4839 #else
4841 static inline void perf_tp_register(void)
4845 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4847 return -ENOENT;
4850 static void perf_event_free_filter(struct perf_event *event)
4854 #endif /* CONFIG_EVENT_TRACING */
4856 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4857 void perf_bp_event(struct perf_event *bp, void *data)
4859 struct perf_sample_data sample;
4860 struct pt_regs *regs = data;
4862 perf_sample_data_init(&sample, bp->attr.bp_addr);
4864 if (!bp->hw.state && !perf_exclude_event(bp, regs))
4865 perf_swevent_event(bp, 1, 1, &sample, regs);
4867 #endif
4870 * hrtimer based swevent callback
4873 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4875 enum hrtimer_restart ret = HRTIMER_RESTART;
4876 struct perf_sample_data data;
4877 struct pt_regs *regs;
4878 struct perf_event *event;
4879 u64 period;
4881 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4882 event->pmu->read(event);
4884 perf_sample_data_init(&data, 0);
4885 data.period = event->hw.last_period;
4886 regs = get_irq_regs();
4888 if (regs && !perf_exclude_event(event, regs)) {
4889 if (!(event->attr.exclude_idle && current->pid == 0))
4890 if (perf_event_overflow(event, 0, &data, regs))
4891 ret = HRTIMER_NORESTART;
4894 period = max_t(u64, 10000, event->hw.sample_period);
4895 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4897 return ret;
4900 static void perf_swevent_start_hrtimer(struct perf_event *event)
4902 struct hw_perf_event *hwc = &event->hw;
4904 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4905 hwc->hrtimer.function = perf_swevent_hrtimer;
4906 if (hwc->sample_period) {
4907 s64 period = local64_read(&hwc->period_left);
4909 if (period) {
4910 if (period < 0)
4911 period = 10000;
4913 local64_set(&hwc->period_left, 0);
4914 } else {
4915 period = max_t(u64, 10000, hwc->sample_period);
4917 __hrtimer_start_range_ns(&hwc->hrtimer,
4918 ns_to_ktime(period), 0,
4919 HRTIMER_MODE_REL_PINNED, 0);
4923 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4925 struct hw_perf_event *hwc = &event->hw;
4927 if (hwc->sample_period) {
4928 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4929 local64_set(&hwc->period_left, ktime_to_ns(remaining));
4931 hrtimer_cancel(&hwc->hrtimer);
4936 * Software event: cpu wall time clock
4939 static void cpu_clock_event_update(struct perf_event *event)
4941 s64 prev;
4942 u64 now;
4944 now = local_clock();
4945 prev = local64_xchg(&event->hw.prev_count, now);
4946 local64_add(now - prev, &event->count);
4949 static void cpu_clock_event_start(struct perf_event *event, int flags)
4951 local64_set(&event->hw.prev_count, local_clock());
4952 perf_swevent_start_hrtimer(event);
4955 static void cpu_clock_event_stop(struct perf_event *event, int flags)
4957 perf_swevent_cancel_hrtimer(event);
4958 cpu_clock_event_update(event);
4961 static int cpu_clock_event_add(struct perf_event *event, int flags)
4963 if (flags & PERF_EF_START)
4964 cpu_clock_event_start(event, flags);
4966 return 0;
4969 static void cpu_clock_event_del(struct perf_event *event, int flags)
4971 cpu_clock_event_stop(event, flags);
4974 static void cpu_clock_event_read(struct perf_event *event)
4976 cpu_clock_event_update(event);
4979 static int cpu_clock_event_init(struct perf_event *event)
4981 if (event->attr.type != PERF_TYPE_SOFTWARE)
4982 return -ENOENT;
4984 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
4985 return -ENOENT;
4987 return 0;
4990 static struct pmu perf_cpu_clock = {
4991 .task_ctx_nr = perf_sw_context,
4993 .event_init = cpu_clock_event_init,
4994 .add = cpu_clock_event_add,
4995 .del = cpu_clock_event_del,
4996 .start = cpu_clock_event_start,
4997 .stop = cpu_clock_event_stop,
4998 .read = cpu_clock_event_read,
5002 * Software event: task time clock
5005 static void task_clock_event_update(struct perf_event *event, u64 now)
5007 u64 prev;
5008 s64 delta;
5010 prev = local64_xchg(&event->hw.prev_count, now);
5011 delta = now - prev;
5012 local64_add(delta, &event->count);
5015 static void task_clock_event_start(struct perf_event *event, int flags)
5017 local64_set(&event->hw.prev_count, event->ctx->time);
5018 perf_swevent_start_hrtimer(event);
5021 static void task_clock_event_stop(struct perf_event *event, int flags)
5023 perf_swevent_cancel_hrtimer(event);
5024 task_clock_event_update(event, event->ctx->time);
5027 static int task_clock_event_add(struct perf_event *event, int flags)
5029 if (flags & PERF_EF_START)
5030 task_clock_event_start(event, flags);
5032 return 0;
5035 static void task_clock_event_del(struct perf_event *event, int flags)
5037 task_clock_event_stop(event, PERF_EF_UPDATE);
5040 static void task_clock_event_read(struct perf_event *event)
5042 u64 time;
5044 if (!in_nmi()) {
5045 update_context_time(event->ctx);
5046 time = event->ctx->time;
5047 } else {
5048 u64 now = perf_clock();
5049 u64 delta = now - event->ctx->timestamp;
5050 time = event->ctx->time + delta;
5053 task_clock_event_update(event, time);
5056 static int task_clock_event_init(struct perf_event *event)
5058 if (event->attr.type != PERF_TYPE_SOFTWARE)
5059 return -ENOENT;
5061 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5062 return -ENOENT;
5064 return 0;
5067 static struct pmu perf_task_clock = {
5068 .task_ctx_nr = perf_sw_context,
5070 .event_init = task_clock_event_init,
5071 .add = task_clock_event_add,
5072 .del = task_clock_event_del,
5073 .start = task_clock_event_start,
5074 .stop = task_clock_event_stop,
5075 .read = task_clock_event_read,
5078 static void perf_pmu_nop_void(struct pmu *pmu)
5082 static int perf_pmu_nop_int(struct pmu *pmu)
5084 return 0;
5087 static void perf_pmu_start_txn(struct pmu *pmu)
5089 perf_pmu_disable(pmu);
5092 static int perf_pmu_commit_txn(struct pmu *pmu)
5094 perf_pmu_enable(pmu);
5095 return 0;
5098 static void perf_pmu_cancel_txn(struct pmu *pmu)
5100 perf_pmu_enable(pmu);
5104 * Ensures all contexts with the same task_ctx_nr have the same
5105 * pmu_cpu_context too.
5107 static void *find_pmu_context(int ctxn)
5109 struct pmu *pmu;
5111 if (ctxn < 0)
5112 return NULL;
5114 list_for_each_entry(pmu, &pmus, entry) {
5115 if (pmu->task_ctx_nr == ctxn)
5116 return pmu->pmu_cpu_context;
5119 return NULL;
5122 static void free_pmu_context(void * __percpu cpu_context)
5124 struct pmu *pmu;
5126 mutex_lock(&pmus_lock);
5128 * Like a real lame refcount.
5130 list_for_each_entry(pmu, &pmus, entry) {
5131 if (pmu->pmu_cpu_context == cpu_context)
5132 goto out;
5135 free_percpu(cpu_context);
5136 out:
5137 mutex_unlock(&pmus_lock);
5140 int perf_pmu_register(struct pmu *pmu)
5142 int cpu, ret;
5144 mutex_lock(&pmus_lock);
5145 ret = -ENOMEM;
5146 pmu->pmu_disable_count = alloc_percpu(int);
5147 if (!pmu->pmu_disable_count)
5148 goto unlock;
5150 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5151 if (pmu->pmu_cpu_context)
5152 goto got_cpu_context;
5154 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5155 if (!pmu->pmu_cpu_context)
5156 goto free_pdc;
5158 for_each_possible_cpu(cpu) {
5159 struct perf_cpu_context *cpuctx;
5161 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5162 __perf_event_init_context(&cpuctx->ctx);
5163 cpuctx->ctx.type = cpu_context;
5164 cpuctx->ctx.pmu = pmu;
5165 cpuctx->jiffies_interval = 1;
5166 INIT_LIST_HEAD(&cpuctx->rotation_list);
5169 got_cpu_context:
5170 if (!pmu->start_txn) {
5171 if (pmu->pmu_enable) {
5173 * If we have pmu_enable/pmu_disable calls, install
5174 * transaction stubs that use that to try and batch
5175 * hardware accesses.
5177 pmu->start_txn = perf_pmu_start_txn;
5178 pmu->commit_txn = perf_pmu_commit_txn;
5179 pmu->cancel_txn = perf_pmu_cancel_txn;
5180 } else {
5181 pmu->start_txn = perf_pmu_nop_void;
5182 pmu->commit_txn = perf_pmu_nop_int;
5183 pmu->cancel_txn = perf_pmu_nop_void;
5187 if (!pmu->pmu_enable) {
5188 pmu->pmu_enable = perf_pmu_nop_void;
5189 pmu->pmu_disable = perf_pmu_nop_void;
5192 list_add_rcu(&pmu->entry, &pmus);
5193 ret = 0;
5194 unlock:
5195 mutex_unlock(&pmus_lock);
5197 return ret;
5199 free_pdc:
5200 free_percpu(pmu->pmu_disable_count);
5201 goto unlock;
5204 void perf_pmu_unregister(struct pmu *pmu)
5206 mutex_lock(&pmus_lock);
5207 list_del_rcu(&pmu->entry);
5208 mutex_unlock(&pmus_lock);
5211 * We dereference the pmu list under both SRCU and regular RCU, so
5212 * synchronize against both of those.
5214 synchronize_srcu(&pmus_srcu);
5215 synchronize_rcu();
5217 free_percpu(pmu->pmu_disable_count);
5218 free_pmu_context(pmu->pmu_cpu_context);
5221 struct pmu *perf_init_event(struct perf_event *event)
5223 struct pmu *pmu = NULL;
5224 int idx;
5226 idx = srcu_read_lock(&pmus_srcu);
5227 list_for_each_entry_rcu(pmu, &pmus, entry) {
5228 int ret = pmu->event_init(event);
5229 if (!ret)
5230 goto unlock;
5232 if (ret != -ENOENT) {
5233 pmu = ERR_PTR(ret);
5234 goto unlock;
5237 pmu = ERR_PTR(-ENOENT);
5238 unlock:
5239 srcu_read_unlock(&pmus_srcu, idx);
5241 return pmu;
5245 * Allocate and initialize a event structure
5247 static struct perf_event *
5248 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5249 struct task_struct *task,
5250 struct perf_event *group_leader,
5251 struct perf_event *parent_event,
5252 perf_overflow_handler_t overflow_handler)
5254 struct pmu *pmu;
5255 struct perf_event *event;
5256 struct hw_perf_event *hwc;
5257 long err;
5259 event = kzalloc(sizeof(*event), GFP_KERNEL);
5260 if (!event)
5261 return ERR_PTR(-ENOMEM);
5264 * Single events are their own group leaders, with an
5265 * empty sibling list:
5267 if (!group_leader)
5268 group_leader = event;
5270 mutex_init(&event->child_mutex);
5271 INIT_LIST_HEAD(&event->child_list);
5273 INIT_LIST_HEAD(&event->group_entry);
5274 INIT_LIST_HEAD(&event->event_entry);
5275 INIT_LIST_HEAD(&event->sibling_list);
5276 init_waitqueue_head(&event->waitq);
5277 init_irq_work(&event->pending, perf_pending_event);
5279 mutex_init(&event->mmap_mutex);
5281 event->cpu = cpu;
5282 event->attr = *attr;
5283 event->group_leader = group_leader;
5284 event->pmu = NULL;
5285 event->oncpu = -1;
5287 event->parent = parent_event;
5289 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5290 event->id = atomic64_inc_return(&perf_event_id);
5292 event->state = PERF_EVENT_STATE_INACTIVE;
5294 if (task) {
5295 event->attach_state = PERF_ATTACH_TASK;
5296 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5298 * hw_breakpoint is a bit difficult here..
5300 if (attr->type == PERF_TYPE_BREAKPOINT)
5301 event->hw.bp_target = task;
5302 #endif
5305 if (!overflow_handler && parent_event)
5306 overflow_handler = parent_event->overflow_handler;
5308 event->overflow_handler = overflow_handler;
5310 if (attr->disabled)
5311 event->state = PERF_EVENT_STATE_OFF;
5313 pmu = NULL;
5315 hwc = &event->hw;
5316 hwc->sample_period = attr->sample_period;
5317 if (attr->freq && attr->sample_freq)
5318 hwc->sample_period = 1;
5319 hwc->last_period = hwc->sample_period;
5321 local64_set(&hwc->period_left, hwc->sample_period);
5324 * we currently do not support PERF_FORMAT_GROUP on inherited events
5326 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5327 goto done;
5329 pmu = perf_init_event(event);
5331 done:
5332 err = 0;
5333 if (!pmu)
5334 err = -EINVAL;
5335 else if (IS_ERR(pmu))
5336 err = PTR_ERR(pmu);
5338 if (err) {
5339 if (event->ns)
5340 put_pid_ns(event->ns);
5341 kfree(event);
5342 return ERR_PTR(err);
5345 event->pmu = pmu;
5347 if (!event->parent) {
5348 if (event->attach_state & PERF_ATTACH_TASK)
5349 jump_label_inc(&perf_task_events);
5350 if (event->attr.mmap || event->attr.mmap_data)
5351 atomic_inc(&nr_mmap_events);
5352 if (event->attr.comm)
5353 atomic_inc(&nr_comm_events);
5354 if (event->attr.task)
5355 atomic_inc(&nr_task_events);
5356 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5357 err = get_callchain_buffers();
5358 if (err) {
5359 free_event(event);
5360 return ERR_PTR(err);
5365 return event;
5368 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5369 struct perf_event_attr *attr)
5371 u32 size;
5372 int ret;
5374 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5375 return -EFAULT;
5378 * zero the full structure, so that a short copy will be nice.
5380 memset(attr, 0, sizeof(*attr));
5382 ret = get_user(size, &uattr->size);
5383 if (ret)
5384 return ret;
5386 if (size > PAGE_SIZE) /* silly large */
5387 goto err_size;
5389 if (!size) /* abi compat */
5390 size = PERF_ATTR_SIZE_VER0;
5392 if (size < PERF_ATTR_SIZE_VER0)
5393 goto err_size;
5396 * If we're handed a bigger struct than we know of,
5397 * ensure all the unknown bits are 0 - i.e. new
5398 * user-space does not rely on any kernel feature
5399 * extensions we dont know about yet.
5401 if (size > sizeof(*attr)) {
5402 unsigned char __user *addr;
5403 unsigned char __user *end;
5404 unsigned char val;
5406 addr = (void __user *)uattr + sizeof(*attr);
5407 end = (void __user *)uattr + size;
5409 for (; addr < end; addr++) {
5410 ret = get_user(val, addr);
5411 if (ret)
5412 return ret;
5413 if (val)
5414 goto err_size;
5416 size = sizeof(*attr);
5419 ret = copy_from_user(attr, uattr, size);
5420 if (ret)
5421 return -EFAULT;
5424 * If the type exists, the corresponding creation will verify
5425 * the attr->config.
5427 if (attr->type >= PERF_TYPE_MAX)
5428 return -EINVAL;
5430 if (attr->__reserved_1)
5431 return -EINVAL;
5433 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5434 return -EINVAL;
5436 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5437 return -EINVAL;
5439 out:
5440 return ret;
5442 err_size:
5443 put_user(sizeof(*attr), &uattr->size);
5444 ret = -E2BIG;
5445 goto out;
5448 static int
5449 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5451 struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5452 int ret = -EINVAL;
5454 if (!output_event)
5455 goto set;
5457 /* don't allow circular references */
5458 if (event == output_event)
5459 goto out;
5462 * Don't allow cross-cpu buffers
5464 if (output_event->cpu != event->cpu)
5465 goto out;
5468 * If its not a per-cpu buffer, it must be the same task.
5470 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5471 goto out;
5473 set:
5474 mutex_lock(&event->mmap_mutex);
5475 /* Can't redirect output if we've got an active mmap() */
5476 if (atomic_read(&event->mmap_count))
5477 goto unlock;
5479 if (output_event) {
5480 /* get the buffer we want to redirect to */
5481 buffer = perf_buffer_get(output_event);
5482 if (!buffer)
5483 goto unlock;
5486 old_buffer = event->buffer;
5487 rcu_assign_pointer(event->buffer, buffer);
5488 ret = 0;
5489 unlock:
5490 mutex_unlock(&event->mmap_mutex);
5492 if (old_buffer)
5493 perf_buffer_put(old_buffer);
5494 out:
5495 return ret;
5499 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5501 * @attr_uptr: event_id type attributes for monitoring/sampling
5502 * @pid: target pid
5503 * @cpu: target cpu
5504 * @group_fd: group leader event fd
5506 SYSCALL_DEFINE5(perf_event_open,
5507 struct perf_event_attr __user *, attr_uptr,
5508 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5510 struct perf_event *group_leader = NULL, *output_event = NULL;
5511 struct perf_event *event, *sibling;
5512 struct perf_event_attr attr;
5513 struct perf_event_context *ctx;
5514 struct file *event_file = NULL;
5515 struct file *group_file = NULL;
5516 struct task_struct *task = NULL;
5517 struct pmu *pmu;
5518 int event_fd;
5519 int move_group = 0;
5520 int fput_needed = 0;
5521 int err;
5523 /* for future expandability... */
5524 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5525 return -EINVAL;
5527 err = perf_copy_attr(attr_uptr, &attr);
5528 if (err)
5529 return err;
5531 if (!attr.exclude_kernel) {
5532 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5533 return -EACCES;
5536 if (attr.freq) {
5537 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5538 return -EINVAL;
5541 event_fd = get_unused_fd_flags(O_RDWR);
5542 if (event_fd < 0)
5543 return event_fd;
5545 if (group_fd != -1) {
5546 group_leader = perf_fget_light(group_fd, &fput_needed);
5547 if (IS_ERR(group_leader)) {
5548 err = PTR_ERR(group_leader);
5549 goto err_fd;
5551 group_file = group_leader->filp;
5552 if (flags & PERF_FLAG_FD_OUTPUT)
5553 output_event = group_leader;
5554 if (flags & PERF_FLAG_FD_NO_GROUP)
5555 group_leader = NULL;
5558 if (pid != -1) {
5559 task = find_lively_task_by_vpid(pid);
5560 if (IS_ERR(task)) {
5561 err = PTR_ERR(task);
5562 goto err_group_fd;
5566 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
5567 if (IS_ERR(event)) {
5568 err = PTR_ERR(event);
5569 goto err_task;
5573 * Special case software events and allow them to be part of
5574 * any hardware group.
5576 pmu = event->pmu;
5578 if (group_leader &&
5579 (is_software_event(event) != is_software_event(group_leader))) {
5580 if (is_software_event(event)) {
5582 * If event and group_leader are not both a software
5583 * event, and event is, then group leader is not.
5585 * Allow the addition of software events to !software
5586 * groups, this is safe because software events never
5587 * fail to schedule.
5589 pmu = group_leader->pmu;
5590 } else if (is_software_event(group_leader) &&
5591 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5593 * In case the group is a pure software group, and we
5594 * try to add a hardware event, move the whole group to
5595 * the hardware context.
5597 move_group = 1;
5602 * Get the target context (task or percpu):
5604 ctx = find_get_context(pmu, task, cpu);
5605 if (IS_ERR(ctx)) {
5606 err = PTR_ERR(ctx);
5607 goto err_alloc;
5611 * Look up the group leader (we will attach this event to it):
5613 if (group_leader) {
5614 err = -EINVAL;
5617 * Do not allow a recursive hierarchy (this new sibling
5618 * becoming part of another group-sibling):
5620 if (group_leader->group_leader != group_leader)
5621 goto err_context;
5623 * Do not allow to attach to a group in a different
5624 * task or CPU context:
5626 if (move_group) {
5627 if (group_leader->ctx->type != ctx->type)
5628 goto err_context;
5629 } else {
5630 if (group_leader->ctx != ctx)
5631 goto err_context;
5635 * Only a group leader can be exclusive or pinned
5637 if (attr.exclusive || attr.pinned)
5638 goto err_context;
5641 if (output_event) {
5642 err = perf_event_set_output(event, output_event);
5643 if (err)
5644 goto err_context;
5647 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5648 if (IS_ERR(event_file)) {
5649 err = PTR_ERR(event_file);
5650 goto err_context;
5653 if (move_group) {
5654 struct perf_event_context *gctx = group_leader->ctx;
5656 mutex_lock(&gctx->mutex);
5657 perf_event_remove_from_context(group_leader);
5658 list_for_each_entry(sibling, &group_leader->sibling_list,
5659 group_entry) {
5660 perf_event_remove_from_context(sibling);
5661 put_ctx(gctx);
5663 mutex_unlock(&gctx->mutex);
5664 put_ctx(gctx);
5667 event->filp = event_file;
5668 WARN_ON_ONCE(ctx->parent_ctx);
5669 mutex_lock(&ctx->mutex);
5671 if (move_group) {
5672 perf_install_in_context(ctx, group_leader, cpu);
5673 get_ctx(ctx);
5674 list_for_each_entry(sibling, &group_leader->sibling_list,
5675 group_entry) {
5676 perf_install_in_context(ctx, sibling, cpu);
5677 get_ctx(ctx);
5681 perf_install_in_context(ctx, event, cpu);
5682 ++ctx->generation;
5683 mutex_unlock(&ctx->mutex);
5685 event->owner = current;
5686 get_task_struct(current);
5687 mutex_lock(&current->perf_event_mutex);
5688 list_add_tail(&event->owner_entry, &current->perf_event_list);
5689 mutex_unlock(&current->perf_event_mutex);
5692 * Drop the reference on the group_event after placing the
5693 * new event on the sibling_list. This ensures destruction
5694 * of the group leader will find the pointer to itself in
5695 * perf_group_detach().
5697 fput_light(group_file, fput_needed);
5698 fd_install(event_fd, event_file);
5699 return event_fd;
5701 err_context:
5702 put_ctx(ctx);
5703 err_alloc:
5704 free_event(event);
5705 err_task:
5706 if (task)
5707 put_task_struct(task);
5708 err_group_fd:
5709 fput_light(group_file, fput_needed);
5710 err_fd:
5711 put_unused_fd(event_fd);
5712 return err;
5716 * perf_event_create_kernel_counter
5718 * @attr: attributes of the counter to create
5719 * @cpu: cpu in which the counter is bound
5720 * @task: task to profile (NULL for percpu)
5722 struct perf_event *
5723 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5724 struct task_struct *task,
5725 perf_overflow_handler_t overflow_handler)
5727 struct perf_event_context *ctx;
5728 struct perf_event *event;
5729 int err;
5732 * Get the target context (task or percpu):
5735 event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
5736 if (IS_ERR(event)) {
5737 err = PTR_ERR(event);
5738 goto err;
5741 ctx = find_get_context(event->pmu, task, cpu);
5742 if (IS_ERR(ctx)) {
5743 err = PTR_ERR(ctx);
5744 goto err_free;
5747 event->filp = NULL;
5748 WARN_ON_ONCE(ctx->parent_ctx);
5749 mutex_lock(&ctx->mutex);
5750 perf_install_in_context(ctx, event, cpu);
5751 ++ctx->generation;
5752 mutex_unlock(&ctx->mutex);
5754 event->owner = current;
5755 get_task_struct(current);
5756 mutex_lock(&current->perf_event_mutex);
5757 list_add_tail(&event->owner_entry, &current->perf_event_list);
5758 mutex_unlock(&current->perf_event_mutex);
5760 return event;
5762 err_free:
5763 free_event(event);
5764 err:
5765 return ERR_PTR(err);
5767 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5769 static void sync_child_event(struct perf_event *child_event,
5770 struct task_struct *child)
5772 struct perf_event *parent_event = child_event->parent;
5773 u64 child_val;
5775 if (child_event->attr.inherit_stat)
5776 perf_event_read_event(child_event, child);
5778 child_val = perf_event_count(child_event);
5781 * Add back the child's count to the parent's count:
5783 atomic64_add(child_val, &parent_event->child_count);
5784 atomic64_add(child_event->total_time_enabled,
5785 &parent_event->child_total_time_enabled);
5786 atomic64_add(child_event->total_time_running,
5787 &parent_event->child_total_time_running);
5790 * Remove this event from the parent's list
5792 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5793 mutex_lock(&parent_event->child_mutex);
5794 list_del_init(&child_event->child_list);
5795 mutex_unlock(&parent_event->child_mutex);
5798 * Release the parent event, if this was the last
5799 * reference to it.
5801 fput(parent_event->filp);
5804 static void
5805 __perf_event_exit_task(struct perf_event *child_event,
5806 struct perf_event_context *child_ctx,
5807 struct task_struct *child)
5809 struct perf_event *parent_event;
5811 perf_event_remove_from_context(child_event);
5813 parent_event = child_event->parent;
5815 * It can happen that parent exits first, and has events
5816 * that are still around due to the child reference. These
5817 * events need to be zapped - but otherwise linger.
5819 if (parent_event) {
5820 sync_child_event(child_event, child);
5821 free_event(child_event);
5825 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5827 struct perf_event *child_event, *tmp;
5828 struct perf_event_context *child_ctx;
5829 unsigned long flags;
5831 if (likely(!child->perf_event_ctxp[ctxn])) {
5832 perf_event_task(child, NULL, 0);
5833 return;
5836 local_irq_save(flags);
5838 * We can't reschedule here because interrupts are disabled,
5839 * and either child is current or it is a task that can't be
5840 * scheduled, so we are now safe from rescheduling changing
5841 * our context.
5843 child_ctx = child->perf_event_ctxp[ctxn];
5844 task_ctx_sched_out(child_ctx, EVENT_ALL);
5847 * Take the context lock here so that if find_get_context is
5848 * reading child->perf_event_ctxp, we wait until it has
5849 * incremented the context's refcount before we do put_ctx below.
5851 raw_spin_lock(&child_ctx->lock);
5852 child->perf_event_ctxp[ctxn] = NULL;
5854 * If this context is a clone; unclone it so it can't get
5855 * swapped to another process while we're removing all
5856 * the events from it.
5858 unclone_ctx(child_ctx);
5859 update_context_time(child_ctx);
5860 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5863 * Report the task dead after unscheduling the events so that we
5864 * won't get any samples after PERF_RECORD_EXIT. We can however still
5865 * get a few PERF_RECORD_READ events.
5867 perf_event_task(child, child_ctx, 0);
5870 * We can recurse on the same lock type through:
5872 * __perf_event_exit_task()
5873 * sync_child_event()
5874 * fput(parent_event->filp)
5875 * perf_release()
5876 * mutex_lock(&ctx->mutex)
5878 * But since its the parent context it won't be the same instance.
5880 mutex_lock(&child_ctx->mutex);
5882 again:
5883 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5884 group_entry)
5885 __perf_event_exit_task(child_event, child_ctx, child);
5887 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5888 group_entry)
5889 __perf_event_exit_task(child_event, child_ctx, child);
5892 * If the last event was a group event, it will have appended all
5893 * its siblings to the list, but we obtained 'tmp' before that which
5894 * will still point to the list head terminating the iteration.
5896 if (!list_empty(&child_ctx->pinned_groups) ||
5897 !list_empty(&child_ctx->flexible_groups))
5898 goto again;
5900 mutex_unlock(&child_ctx->mutex);
5902 put_ctx(child_ctx);
5906 * When a child task exits, feed back event values to parent events.
5908 void perf_event_exit_task(struct task_struct *child)
5910 int ctxn;
5912 for_each_task_context_nr(ctxn)
5913 perf_event_exit_task_context(child, ctxn);
5916 static void perf_free_event(struct perf_event *event,
5917 struct perf_event_context *ctx)
5919 struct perf_event *parent = event->parent;
5921 if (WARN_ON_ONCE(!parent))
5922 return;
5924 mutex_lock(&parent->child_mutex);
5925 list_del_init(&event->child_list);
5926 mutex_unlock(&parent->child_mutex);
5928 fput(parent->filp);
5930 perf_group_detach(event);
5931 list_del_event(event, ctx);
5932 free_event(event);
5936 * free an unexposed, unused context as created by inheritance by
5937 * perf_event_init_task below, used by fork() in case of fail.
5939 void perf_event_free_task(struct task_struct *task)
5941 struct perf_event_context *ctx;
5942 struct perf_event *event, *tmp;
5943 int ctxn;
5945 for_each_task_context_nr(ctxn) {
5946 ctx = task->perf_event_ctxp[ctxn];
5947 if (!ctx)
5948 continue;
5950 mutex_lock(&ctx->mutex);
5951 again:
5952 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
5953 group_entry)
5954 perf_free_event(event, ctx);
5956 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5957 group_entry)
5958 perf_free_event(event, ctx);
5960 if (!list_empty(&ctx->pinned_groups) ||
5961 !list_empty(&ctx->flexible_groups))
5962 goto again;
5964 mutex_unlock(&ctx->mutex);
5966 put_ctx(ctx);
5970 void perf_event_delayed_put(struct task_struct *task)
5972 int ctxn;
5974 for_each_task_context_nr(ctxn)
5975 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
5979 * inherit a event from parent task to child task:
5981 static struct perf_event *
5982 inherit_event(struct perf_event *parent_event,
5983 struct task_struct *parent,
5984 struct perf_event_context *parent_ctx,
5985 struct task_struct *child,
5986 struct perf_event *group_leader,
5987 struct perf_event_context *child_ctx)
5989 struct perf_event *child_event;
5990 unsigned long flags;
5993 * Instead of creating recursive hierarchies of events,
5994 * we link inherited events back to the original parent,
5995 * which has a filp for sure, which we use as the reference
5996 * count:
5998 if (parent_event->parent)
5999 parent_event = parent_event->parent;
6001 child_event = perf_event_alloc(&parent_event->attr,
6002 parent_event->cpu,
6003 child,
6004 group_leader, parent_event,
6005 NULL);
6006 if (IS_ERR(child_event))
6007 return child_event;
6008 get_ctx(child_ctx);
6011 * Make the child state follow the state of the parent event,
6012 * not its attr.disabled bit. We hold the parent's mutex,
6013 * so we won't race with perf_event_{en, dis}able_family.
6015 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6016 child_event->state = PERF_EVENT_STATE_INACTIVE;
6017 else
6018 child_event->state = PERF_EVENT_STATE_OFF;
6020 if (parent_event->attr.freq) {
6021 u64 sample_period = parent_event->hw.sample_period;
6022 struct hw_perf_event *hwc = &child_event->hw;
6024 hwc->sample_period = sample_period;
6025 hwc->last_period = sample_period;
6027 local64_set(&hwc->period_left, sample_period);
6030 child_event->ctx = child_ctx;
6031 child_event->overflow_handler = parent_event->overflow_handler;
6034 * Link it up in the child's context:
6036 raw_spin_lock_irqsave(&child_ctx->lock, flags);
6037 add_event_to_ctx(child_event, child_ctx);
6038 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6041 * Get a reference to the parent filp - we will fput it
6042 * when the child event exits. This is safe to do because
6043 * we are in the parent and we know that the filp still
6044 * exists and has a nonzero count:
6046 atomic_long_inc(&parent_event->filp->f_count);
6049 * Link this into the parent event's child list
6051 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6052 mutex_lock(&parent_event->child_mutex);
6053 list_add_tail(&child_event->child_list, &parent_event->child_list);
6054 mutex_unlock(&parent_event->child_mutex);
6056 return child_event;
6059 static int inherit_group(struct perf_event *parent_event,
6060 struct task_struct *parent,
6061 struct perf_event_context *parent_ctx,
6062 struct task_struct *child,
6063 struct perf_event_context *child_ctx)
6065 struct perf_event *leader;
6066 struct perf_event *sub;
6067 struct perf_event *child_ctr;
6069 leader = inherit_event(parent_event, parent, parent_ctx,
6070 child, NULL, child_ctx);
6071 if (IS_ERR(leader))
6072 return PTR_ERR(leader);
6073 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6074 child_ctr = inherit_event(sub, parent, parent_ctx,
6075 child, leader, child_ctx);
6076 if (IS_ERR(child_ctr))
6077 return PTR_ERR(child_ctr);
6079 return 0;
6082 static int
6083 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6084 struct perf_event_context *parent_ctx,
6085 struct task_struct *child, int ctxn,
6086 int *inherited_all)
6088 int ret;
6089 struct perf_event_context *child_ctx;
6091 if (!event->attr.inherit) {
6092 *inherited_all = 0;
6093 return 0;
6096 child_ctx = child->perf_event_ctxp[ctxn];
6097 if (!child_ctx) {
6099 * This is executed from the parent task context, so
6100 * inherit events that have been marked for cloning.
6101 * First allocate and initialize a context for the
6102 * child.
6105 child_ctx = alloc_perf_context(event->pmu, child);
6106 if (!child_ctx)
6107 return -ENOMEM;
6109 child->perf_event_ctxp[ctxn] = child_ctx;
6112 ret = inherit_group(event, parent, parent_ctx,
6113 child, child_ctx);
6115 if (ret)
6116 *inherited_all = 0;
6118 return ret;
6122 * Initialize the perf_event context in task_struct
6124 int perf_event_init_context(struct task_struct *child, int ctxn)
6126 struct perf_event_context *child_ctx, *parent_ctx;
6127 struct perf_event_context *cloned_ctx;
6128 struct perf_event *event;
6129 struct task_struct *parent = current;
6130 int inherited_all = 1;
6131 int ret = 0;
6133 child->perf_event_ctxp[ctxn] = NULL;
6135 mutex_init(&child->perf_event_mutex);
6136 INIT_LIST_HEAD(&child->perf_event_list);
6138 if (likely(!parent->perf_event_ctxp[ctxn]))
6139 return 0;
6142 * If the parent's context is a clone, pin it so it won't get
6143 * swapped under us.
6145 parent_ctx = perf_pin_task_context(parent, ctxn);
6148 * No need to check if parent_ctx != NULL here; since we saw
6149 * it non-NULL earlier, the only reason for it to become NULL
6150 * is if we exit, and since we're currently in the middle of
6151 * a fork we can't be exiting at the same time.
6155 * Lock the parent list. No need to lock the child - not PID
6156 * hashed yet and not running, so nobody can access it.
6158 mutex_lock(&parent_ctx->mutex);
6161 * We dont have to disable NMIs - we are only looking at
6162 * the list, not manipulating it:
6164 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6165 ret = inherit_task_group(event, parent, parent_ctx,
6166 child, ctxn, &inherited_all);
6167 if (ret)
6168 break;
6171 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6172 ret = inherit_task_group(event, parent, parent_ctx,
6173 child, ctxn, &inherited_all);
6174 if (ret)
6175 break;
6178 child_ctx = child->perf_event_ctxp[ctxn];
6180 if (child_ctx && inherited_all) {
6182 * Mark the child context as a clone of the parent
6183 * context, or of whatever the parent is a clone of.
6184 * Note that if the parent is a clone, it could get
6185 * uncloned at any point, but that doesn't matter
6186 * because the list of events and the generation
6187 * count can't have changed since we took the mutex.
6189 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
6190 if (cloned_ctx) {
6191 child_ctx->parent_ctx = cloned_ctx;
6192 child_ctx->parent_gen = parent_ctx->parent_gen;
6193 } else {
6194 child_ctx->parent_ctx = parent_ctx;
6195 child_ctx->parent_gen = parent_ctx->generation;
6197 get_ctx(child_ctx->parent_ctx);
6200 mutex_unlock(&parent_ctx->mutex);
6202 perf_unpin_context(parent_ctx);
6204 return ret;
6208 * Initialize the perf_event context in task_struct
6210 int perf_event_init_task(struct task_struct *child)
6212 int ctxn, ret;
6214 for_each_task_context_nr(ctxn) {
6215 ret = perf_event_init_context(child, ctxn);
6216 if (ret)
6217 return ret;
6220 return 0;
6223 static void __init perf_event_init_all_cpus(void)
6225 struct swevent_htable *swhash;
6226 int cpu;
6228 for_each_possible_cpu(cpu) {
6229 swhash = &per_cpu(swevent_htable, cpu);
6230 mutex_init(&swhash->hlist_mutex);
6231 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6235 static void __cpuinit perf_event_init_cpu(int cpu)
6237 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6239 mutex_lock(&swhash->hlist_mutex);
6240 if (swhash->hlist_refcount > 0) {
6241 struct swevent_hlist *hlist;
6243 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6244 WARN_ON(!hlist);
6245 rcu_assign_pointer(swhash->swevent_hlist, hlist);
6247 mutex_unlock(&swhash->hlist_mutex);
6250 #ifdef CONFIG_HOTPLUG_CPU
6251 static void perf_pmu_rotate_stop(struct pmu *pmu)
6253 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6255 WARN_ON(!irqs_disabled());
6257 list_del_init(&cpuctx->rotation_list);
6260 static void __perf_event_exit_context(void *__info)
6262 struct perf_event_context *ctx = __info;
6263 struct perf_event *event, *tmp;
6265 perf_pmu_rotate_stop(ctx->pmu);
6267 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6268 __perf_event_remove_from_context(event);
6269 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6270 __perf_event_remove_from_context(event);
6273 static void perf_event_exit_cpu_context(int cpu)
6275 struct perf_event_context *ctx;
6276 struct pmu *pmu;
6277 int idx;
6279 idx = srcu_read_lock(&pmus_srcu);
6280 list_for_each_entry_rcu(pmu, &pmus, entry) {
6281 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
6283 mutex_lock(&ctx->mutex);
6284 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6285 mutex_unlock(&ctx->mutex);
6287 srcu_read_unlock(&pmus_srcu, idx);
6290 static void perf_event_exit_cpu(int cpu)
6292 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6294 mutex_lock(&swhash->hlist_mutex);
6295 swevent_hlist_release(swhash);
6296 mutex_unlock(&swhash->hlist_mutex);
6298 perf_event_exit_cpu_context(cpu);
6300 #else
6301 static inline void perf_event_exit_cpu(int cpu) { }
6302 #endif
6304 static int __cpuinit
6305 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6307 unsigned int cpu = (long)hcpu;
6309 switch (action & ~CPU_TASKS_FROZEN) {
6311 case CPU_UP_PREPARE:
6312 case CPU_DOWN_FAILED:
6313 perf_event_init_cpu(cpu);
6314 break;
6316 case CPU_UP_CANCELED:
6317 case CPU_DOWN_PREPARE:
6318 perf_event_exit_cpu(cpu);
6319 break;
6321 default:
6322 break;
6325 return NOTIFY_OK;
6328 void __init perf_event_init(void)
6330 perf_event_init_all_cpus();
6331 init_srcu_struct(&pmus_srcu);
6332 perf_pmu_register(&perf_swevent);
6333 perf_pmu_register(&perf_cpu_clock);
6334 perf_pmu_register(&perf_task_clock);
6335 perf_tp_register();
6336 perf_cpu_notifier(perf_cpu_notify);