4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
8 * Data type definitions, declarations, prototypes.
10 * Started by: Thomas Gleixner and Ingo Molnar
12 * For licencing details see kernel-base/COPYING
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
22 * User-space ABI bits:
29 PERF_TYPE_HARDWARE
= 0,
30 PERF_TYPE_SOFTWARE
= 1,
31 PERF_TYPE_TRACEPOINT
= 2,
32 PERF_TYPE_HW_CACHE
= 3,
34 PERF_TYPE_BREAKPOINT
= 5,
36 PERF_TYPE_MAX
, /* non-ABI */
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
46 * Common hardware events, generalized by the kernel:
48 PERF_COUNT_HW_CPU_CYCLES
= 0,
49 PERF_COUNT_HW_INSTRUCTIONS
= 1,
50 PERF_COUNT_HW_CACHE_REFERENCES
= 2,
51 PERF_COUNT_HW_CACHE_MISSES
= 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS
= 4,
53 PERF_COUNT_HW_BRANCH_MISSES
= 5,
54 PERF_COUNT_HW_BUS_CYCLES
= 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
= 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND
= 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES
= 9,
59 PERF_COUNT_HW_MAX
, /* non-ABI */
63 * Generalized hardware cache events:
65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66 * { read, write, prefetch } x
67 * { accesses, misses }
69 enum perf_hw_cache_id
{
70 PERF_COUNT_HW_CACHE_L1D
= 0,
71 PERF_COUNT_HW_CACHE_L1I
= 1,
72 PERF_COUNT_HW_CACHE_LL
= 2,
73 PERF_COUNT_HW_CACHE_DTLB
= 3,
74 PERF_COUNT_HW_CACHE_ITLB
= 4,
75 PERF_COUNT_HW_CACHE_BPU
= 5,
76 PERF_COUNT_HW_CACHE_NODE
= 6,
78 PERF_COUNT_HW_CACHE_MAX
, /* non-ABI */
81 enum perf_hw_cache_op_id
{
82 PERF_COUNT_HW_CACHE_OP_READ
= 0,
83 PERF_COUNT_HW_CACHE_OP_WRITE
= 1,
84 PERF_COUNT_HW_CACHE_OP_PREFETCH
= 2,
86 PERF_COUNT_HW_CACHE_OP_MAX
, /* non-ABI */
89 enum perf_hw_cache_op_result_id
{
90 PERF_COUNT_HW_CACHE_RESULT_ACCESS
= 0,
91 PERF_COUNT_HW_CACHE_RESULT_MISS
= 1,
93 PERF_COUNT_HW_CACHE_RESULT_MAX
, /* non-ABI */
97 * Special "software" events provided by the kernel, even if the hardware
98 * does not support performance events. These events measure various
99 * physical and sw events of the kernel (and allow the profiling of them as
103 PERF_COUNT_SW_CPU_CLOCK
= 0,
104 PERF_COUNT_SW_TASK_CLOCK
= 1,
105 PERF_COUNT_SW_PAGE_FAULTS
= 2,
106 PERF_COUNT_SW_CONTEXT_SWITCHES
= 3,
107 PERF_COUNT_SW_CPU_MIGRATIONS
= 4,
108 PERF_COUNT_SW_PAGE_FAULTS_MIN
= 5,
109 PERF_COUNT_SW_PAGE_FAULTS_MAJ
= 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS
= 7,
111 PERF_COUNT_SW_EMULATION_FAULTS
= 8,
113 PERF_COUNT_SW_MAX
, /* non-ABI */
117 * Bits that can be set in attr.sample_type to request information
118 * in the overflow packets.
120 enum perf_event_sample_format
{
121 PERF_SAMPLE_IP
= 1U << 0,
122 PERF_SAMPLE_TID
= 1U << 1,
123 PERF_SAMPLE_TIME
= 1U << 2,
124 PERF_SAMPLE_ADDR
= 1U << 3,
125 PERF_SAMPLE_READ
= 1U << 4,
126 PERF_SAMPLE_CALLCHAIN
= 1U << 5,
127 PERF_SAMPLE_ID
= 1U << 6,
128 PERF_SAMPLE_CPU
= 1U << 7,
129 PERF_SAMPLE_PERIOD
= 1U << 8,
130 PERF_SAMPLE_STREAM_ID
= 1U << 9,
131 PERF_SAMPLE_RAW
= 1U << 10,
132 PERF_SAMPLE_BRANCH_STACK
= 1U << 11,
133 PERF_SAMPLE_REGS_USER
= 1U << 12,
134 PERF_SAMPLE_STACK_USER
= 1U << 13,
136 PERF_SAMPLE_MAX
= 1U << 14, /* non-ABI */
140 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
142 * If the user does not pass priv level information via branch_sample_type,
143 * the kernel uses the event's priv level. Branch and event priv levels do
144 * not have to match. Branch priv level is checked for permissions.
146 * The branch types can be combined, however BRANCH_ANY covers all types
147 * of branches and therefore it supersedes all the other types.
149 enum perf_branch_sample_type
{
150 PERF_SAMPLE_BRANCH_USER
= 1U << 0, /* user branches */
151 PERF_SAMPLE_BRANCH_KERNEL
= 1U << 1, /* kernel branches */
152 PERF_SAMPLE_BRANCH_HV
= 1U << 2, /* hypervisor branches */
154 PERF_SAMPLE_BRANCH_ANY
= 1U << 3, /* any branch types */
155 PERF_SAMPLE_BRANCH_ANY_CALL
= 1U << 4, /* any call branch */
156 PERF_SAMPLE_BRANCH_ANY_RETURN
= 1U << 5, /* any return branch */
157 PERF_SAMPLE_BRANCH_IND_CALL
= 1U << 6, /* indirect calls */
159 PERF_SAMPLE_BRANCH_MAX
= 1U << 7, /* non-ABI */
162 #define PERF_SAMPLE_BRANCH_PLM_ALL \
163 (PERF_SAMPLE_BRANCH_USER|\
164 PERF_SAMPLE_BRANCH_KERNEL|\
165 PERF_SAMPLE_BRANCH_HV)
168 * Values to determine ABI of the registers dump.
170 enum perf_sample_regs_abi
{
171 PERF_SAMPLE_REGS_ABI_NONE
= 0,
172 PERF_SAMPLE_REGS_ABI_32
= 1,
173 PERF_SAMPLE_REGS_ABI_64
= 2,
177 * The format of the data returned by read() on a perf event fd,
178 * as specified by attr.read_format:
180 * struct read_format {
182 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
183 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
184 * { u64 id; } && PERF_FORMAT_ID
185 * } && !PERF_FORMAT_GROUP
188 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
189 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
191 * { u64 id; } && PERF_FORMAT_ID
193 * } && PERF_FORMAT_GROUP
196 enum perf_event_read_format
{
197 PERF_FORMAT_TOTAL_TIME_ENABLED
= 1U << 0,
198 PERF_FORMAT_TOTAL_TIME_RUNNING
= 1U << 1,
199 PERF_FORMAT_ID
= 1U << 2,
200 PERF_FORMAT_GROUP
= 1U << 3,
202 PERF_FORMAT_MAX
= 1U << 4, /* non-ABI */
205 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
206 #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
207 #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
208 #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
209 /* add: sample_stack_user */
212 * Hardware event_id to monitor via a performance monitoring event:
214 struct perf_event_attr
{
217 * Major type: hardware/software/tracepoint/etc.
222 * Size of the attr structure, for fwd/bwd compat.
227 * Type specific configuration information.
239 __u64 disabled
: 1, /* off by default */
240 inherit
: 1, /* children inherit it */
241 pinned
: 1, /* must always be on PMU */
242 exclusive
: 1, /* only group on PMU */
243 exclude_user
: 1, /* don't count user */
244 exclude_kernel
: 1, /* ditto kernel */
245 exclude_hv
: 1, /* ditto hypervisor */
246 exclude_idle
: 1, /* don't count when idle */
247 mmap
: 1, /* include mmap data */
248 comm
: 1, /* include comm data */
249 freq
: 1, /* use freq, not period */
250 inherit_stat
: 1, /* per task counts */
251 enable_on_exec
: 1, /* next exec enables */
252 task
: 1, /* trace fork/exit */
253 watermark
: 1, /* wakeup_watermark */
257 * 0 - SAMPLE_IP can have arbitrary skid
258 * 1 - SAMPLE_IP must have constant skid
259 * 2 - SAMPLE_IP requested to have 0 skid
260 * 3 - SAMPLE_IP must have 0 skid
262 * See also PERF_RECORD_MISC_EXACT_IP
264 precise_ip
: 2, /* skid constraint */
265 mmap_data
: 1, /* non-exec mmap data */
266 sample_id_all
: 1, /* sample_type all events */
268 exclude_host
: 1, /* don't count in host */
269 exclude_guest
: 1, /* don't count in guest */
271 exclude_callchain_kernel
: 1, /* exclude kernel callchains */
272 exclude_callchain_user
: 1, /* exclude user callchains */
277 __u32 wakeup_events
; /* wakeup every n events */
278 __u32 wakeup_watermark
; /* bytes before wakeup */
284 __u64 config1
; /* extension of config */
288 __u64 config2
; /* extension of config1 */
290 __u64 branch_sample_type
; /* enum perf_branch_sample_type */
293 * Defines set of user regs to dump on samples.
294 * See asm/perf_regs.h for details.
296 __u64 sample_regs_user
;
299 * Defines size of the user stack to dump on samples.
301 __u32 sample_stack_user
;
308 * Ioctls that can be done on a perf event fd:
310 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
311 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
312 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
313 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
314 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
315 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
316 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
318 enum perf_event_ioc_flags
{
319 PERF_IOC_FLAG_GROUP
= 1U << 0,
323 * Structure of the page that can be mapped via mmap
325 struct perf_event_mmap_page
{
326 __u32 version
; /* version number of this structure */
327 __u32 compat_version
; /* lowest version this is compat with */
330 * Bits needed to read the hw events in user-space.
332 * u32 seq, time_mult, time_shift, idx, width;
333 * u64 count, enabled, running;
334 * u64 cyc, time_offset;
341 * enabled = pc->time_enabled;
342 * running = pc->time_running;
344 * if (pc->cap_usr_time && enabled != running) {
346 * time_offset = pc->time_offset;
347 * time_mult = pc->time_mult;
348 * time_shift = pc->time_shift;
352 * count = pc->offset;
353 * if (pc->cap_usr_rdpmc && idx) {
354 * width = pc->pmc_width;
355 * pmc = rdpmc(idx - 1);
359 * } while (pc->lock != seq);
361 * NOTE: for obvious reason this only works on self-monitoring
364 __u32 lock
; /* seqlock for synchronization */
365 __u32 index
; /* hardware event identifier */
366 __s64 offset
; /* add to hardware event value */
367 __u64 time_enabled
; /* time event active */
368 __u64 time_running
; /* time event on cpu */
371 __u64 cap_usr_time
: 1,
377 * If cap_usr_rdpmc this field provides the bit-width of the value
378 * read using the rdpmc() or equivalent instruction. This can be used
379 * to sign extend the result like:
381 * pmc <<= 64 - width;
382 * pmc >>= 64 - width; // signed shift right
388 * If cap_usr_time the below fields can be used to compute the time
389 * delta since time_enabled (in ns) using rdtsc or similar.
394 * quot = (cyc >> time_shift);
395 * rem = cyc & ((1 << time_shift) - 1);
396 * delta = time_offset + quot * time_mult +
397 * ((rem * time_mult) >> time_shift);
399 * Where time_offset,time_mult,time_shift and cyc are read in the
400 * seqcount loop described above. This delta can then be added to
401 * enabled and possible running (if idx), improving the scaling:
407 * quot = count / running;
408 * rem = count % running;
409 * count = quot * enabled + (rem * enabled) / running;
416 * Hole for extension of the self monitor capabilities
419 __u64 __reserved
[120]; /* align to 1k */
422 * Control data for the mmap() data buffer.
424 * User-space reading the @data_head value should issue an rmb(), on
425 * SMP capable platforms, after reading this value -- see
426 * perf_event_wakeup().
428 * When the mapping is PROT_WRITE the @data_tail value should be
429 * written by userspace to reflect the last read data. In this case
430 * the kernel will not over-write unread data.
432 __u64 data_head
; /* head in the data section */
433 __u64 data_tail
; /* user-space written tail */
436 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
437 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
438 #define PERF_RECORD_MISC_KERNEL (1 << 0)
439 #define PERF_RECORD_MISC_USER (2 << 0)
440 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
441 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
442 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
445 * Indicates that the content of PERF_SAMPLE_IP points to
446 * the actual instruction that triggered the event. See also
447 * perf_event_attr::precise_ip.
449 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
451 * Reserve the last bit to indicate some extended misc field
453 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
455 struct perf_event_header
{
461 enum perf_event_type
{
464 * If perf_event_attr.sample_id_all is set then all event types will
465 * have the sample_type selected fields related to where/when
466 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
467 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
468 * the perf_event_header and the fields already present for the existing
469 * fields, i.e. at the end of the payload. That way a newer perf.data
470 * file will be supported by older perf tools, with these new optional
471 * fields being ignored.
473 * The MMAP events record the PROT_EXEC mappings so that we can
474 * correlate userspace IPs to code. They have the following structure:
477 * struct perf_event_header header;
486 PERF_RECORD_MMAP
= 1,
490 * struct perf_event_header header;
495 PERF_RECORD_LOST
= 2,
499 * struct perf_event_header header;
505 PERF_RECORD_COMM
= 3,
509 * struct perf_event_header header;
515 PERF_RECORD_EXIT
= 4,
519 * struct perf_event_header header;
525 PERF_RECORD_THROTTLE
= 5,
526 PERF_RECORD_UNTHROTTLE
= 6,
530 * struct perf_event_header header;
536 PERF_RECORD_FORK
= 7,
540 * struct perf_event_header header;
543 * struct read_format values;
546 PERF_RECORD_READ
= 8,
550 * struct perf_event_header header;
552 * { u64 ip; } && PERF_SAMPLE_IP
553 * { u32 pid, tid; } && PERF_SAMPLE_TID
554 * { u64 time; } && PERF_SAMPLE_TIME
555 * { u64 addr; } && PERF_SAMPLE_ADDR
556 * { u64 id; } && PERF_SAMPLE_ID
557 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
558 * { u32 cpu, res; } && PERF_SAMPLE_CPU
559 * { u64 period; } && PERF_SAMPLE_PERIOD
561 * { struct read_format values; } && PERF_SAMPLE_READ
564 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
567 * # The RAW record below is opaque data wrt the ABI
569 * # That is, the ABI doesn't make any promises wrt to
570 * # the stability of its content, it may vary depending
571 * # on event, hardware, kernel version and phase of
574 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
578 * char data[size];}&& PERF_SAMPLE_RAW
580 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
582 * { u64 abi; # enum perf_sample_regs_abi
583 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
587 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
590 PERF_RECORD_SAMPLE
= 9,
592 PERF_RECORD_MAX
, /* non-ABI */
595 #define PERF_MAX_STACK_DEPTH 127
597 enum perf_callchain_context
{
598 PERF_CONTEXT_HV
= (__u64
)-32,
599 PERF_CONTEXT_KERNEL
= (__u64
)-128,
600 PERF_CONTEXT_USER
= (__u64
)-512,
602 PERF_CONTEXT_GUEST
= (__u64
)-2048,
603 PERF_CONTEXT_GUEST_KERNEL
= (__u64
)-2176,
604 PERF_CONTEXT_GUEST_USER
= (__u64
)-2560,
606 PERF_CONTEXT_MAX
= (__u64
)-4095,
609 #define PERF_FLAG_FD_NO_GROUP (1U << 0)
610 #define PERF_FLAG_FD_OUTPUT (1U << 1)
611 #define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
615 * Kernel-internal data types and definitions:
618 #ifdef CONFIG_PERF_EVENTS
619 # include <linux/cgroup.h>
620 # include <asm/perf_event.h>
621 # include <asm/local64.h>
624 struct perf_guest_info_callbacks
{
625 int (*is_in_guest
)(void);
626 int (*is_user_mode
)(void);
627 unsigned long (*get_guest_ip
)(void);
630 #ifdef CONFIG_HAVE_HW_BREAKPOINT
631 #include <asm/hw_breakpoint.h>
634 #include <linux/list.h>
635 #include <linux/mutex.h>
636 #include <linux/rculist.h>
637 #include <linux/rcupdate.h>
638 #include <linux/spinlock.h>
639 #include <linux/hrtimer.h>
640 #include <linux/fs.h>
641 #include <linux/pid_namespace.h>
642 #include <linux/workqueue.h>
643 #include <linux/ftrace.h>
644 #include <linux/cpu.h>
645 #include <linux/irq_work.h>
646 #include <linux/static_key.h>
647 #include <linux/atomic.h>
648 #include <linux/sysfs.h>
649 #include <linux/perf_regs.h>
650 #include <asm/local.h>
652 struct perf_callchain_entry
{
654 __u64 ip
[PERF_MAX_STACK_DEPTH
];
657 struct perf_raw_record
{
663 * single taken branch record layout:
665 * from: source instruction (may not always be a branch insn)
667 * mispred: branch target was mispredicted
668 * predicted: branch target was predicted
670 * support for mispred, predicted is optional. In case it
671 * is not supported mispred = predicted = 0.
673 struct perf_branch_entry
{
676 __u64 mispred
:1, /* target mispredicted */
677 predicted
:1,/* target predicted */
682 * branch stack layout:
683 * nr: number of taken branches stored in entries[]
685 * Note that nr can vary from sample to sample
686 * branches (to, from) are stored from most recent
687 * to least recent, i.e., entries[0] contains the most
690 struct perf_branch_stack
{
692 struct perf_branch_entry entries
[0];
695 struct perf_regs_user
{
697 struct pt_regs
*regs
;
703 * extra PMU register associated with an event
705 struct hw_perf_event_extra
{
706 u64 config
; /* register value */
707 unsigned int reg
; /* register address or index */
708 int alloc
; /* extra register already allocated */
709 int idx
; /* index in shared_regs->regs[] */
713 * struct hw_perf_event - performance event hardware details:
715 struct hw_perf_event
{
716 #ifdef CONFIG_PERF_EVENTS
718 struct { /* hardware */
721 unsigned long config_base
;
722 unsigned long event_base
;
723 int event_base_rdpmc
;
727 struct hw_perf_event_extra extra_reg
;
728 struct hw_perf_event_extra branch_reg
;
730 struct { /* software */
731 struct hrtimer hrtimer
;
733 #ifdef CONFIG_HAVE_HW_BREAKPOINT
734 struct { /* breakpoint */
735 struct arch_hw_breakpoint info
;
736 struct list_head bp_list
;
738 * Crufty hack to avoid the chicken and egg
739 * problem hw_breakpoint has with context
740 * creation and event initalization.
742 struct task_struct
*bp_target
;
747 local64_t prev_count
;
750 local64_t period_left
;
755 u64 freq_count_stamp
;
760 * hw_perf_event::state flags
762 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
763 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
764 #define PERF_HES_ARCH 0x04
769 * Common implementation detail of pmu::{start,commit,cancel}_txn
771 #define PERF_EVENT_TXN 0x1
774 * struct pmu - generic performance monitoring unit
777 struct list_head entry
;
780 const struct attribute_group
**attr_groups
;
784 int * __percpu pmu_disable_count
;
785 struct perf_cpu_context
* __percpu pmu_cpu_context
;
789 * Fully disable/enable this PMU, can be used to protect from the PMI
790 * as well as for lazy/batch writing of the MSRs.
792 void (*pmu_enable
) (struct pmu
*pmu
); /* optional */
793 void (*pmu_disable
) (struct pmu
*pmu
); /* optional */
796 * Try and initialize the event for this PMU.
797 * Should return -ENOENT when the @event doesn't match this PMU.
799 int (*event_init
) (struct perf_event
*event
);
801 #define PERF_EF_START 0x01 /* start the counter when adding */
802 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
803 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
806 * Adds/Removes a counter to/from the PMU, can be done inside
807 * a transaction, see the ->*_txn() methods.
809 int (*add
) (struct perf_event
*event
, int flags
);
810 void (*del
) (struct perf_event
*event
, int flags
);
813 * Starts/Stops a counter present on the PMU. The PMI handler
814 * should stop the counter when perf_event_overflow() returns
815 * !0. ->start() will be used to continue.
817 void (*start
) (struct perf_event
*event
, int flags
);
818 void (*stop
) (struct perf_event
*event
, int flags
);
821 * Updates the counter value of the event.
823 void (*read
) (struct perf_event
*event
);
826 * Group events scheduling is treated as a transaction, add
827 * group events as a whole and perform one schedulability test.
828 * If the test fails, roll back the whole group
830 * Start the transaction, after this ->add() doesn't need to
831 * do schedulability tests.
833 void (*start_txn
) (struct pmu
*pmu
); /* optional */
835 * If ->start_txn() disabled the ->add() schedulability test
836 * then ->commit_txn() is required to perform one. On success
837 * the transaction is closed. On error the transaction is kept
838 * open until ->cancel_txn() is called.
840 int (*commit_txn
) (struct pmu
*pmu
); /* optional */
842 * Will cancel the transaction, assumes ->del() is called
843 * for each successful ->add() during the transaction.
845 void (*cancel_txn
) (struct pmu
*pmu
); /* optional */
848 * Will return the value for perf_event_mmap_page::index for this event,
849 * if no implementation is provided it will default to: event->hw.idx + 1.
851 int (*event_idx
) (struct perf_event
*event
); /*optional */
854 * flush branch stack on context-switches (needed in cpu-wide mode)
856 void (*flush_branch_stack
) (void);
860 * enum perf_event_active_state - the states of a event
862 enum perf_event_active_state
{
863 PERF_EVENT_STATE_ERROR
= -2,
864 PERF_EVENT_STATE_OFF
= -1,
865 PERF_EVENT_STATE_INACTIVE
= 0,
866 PERF_EVENT_STATE_ACTIVE
= 1,
870 struct perf_sample_data
;
872 typedef void (*perf_overflow_handler_t
)(struct perf_event
*,
873 struct perf_sample_data
*,
874 struct pt_regs
*regs
);
876 enum perf_group_flag
{
877 PERF_GROUP_SOFTWARE
= 0x1,
880 #define SWEVENT_HLIST_BITS 8
881 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
883 struct swevent_hlist
{
884 struct hlist_head heads
[SWEVENT_HLIST_SIZE
];
885 struct rcu_head rcu_head
;
888 #define PERF_ATTACH_CONTEXT 0x01
889 #define PERF_ATTACH_GROUP 0x02
890 #define PERF_ATTACH_TASK 0x04
892 #ifdef CONFIG_CGROUP_PERF
894 * perf_cgroup_info keeps track of time_enabled for a cgroup.
895 * This is a per-cpu dynamically allocated data structure.
897 struct perf_cgroup_info
{
903 struct cgroup_subsys_state css
;
904 struct perf_cgroup_info
*info
; /* timing info, one per cpu */
911 * struct perf_event - performance event kernel representation:
914 #ifdef CONFIG_PERF_EVENTS
915 struct list_head group_entry
;
916 struct list_head event_entry
;
917 struct list_head sibling_list
;
918 struct hlist_node hlist_entry
;
921 struct perf_event
*group_leader
;
924 enum perf_event_active_state state
;
925 unsigned int attach_state
;
927 atomic64_t child_count
;
930 * These are the total time in nanoseconds that the event
931 * has been enabled (i.e. eligible to run, and the task has
932 * been scheduled in, if this is a per-task event)
933 * and running (scheduled onto the CPU), respectively.
935 * They are computed from tstamp_enabled, tstamp_running and
936 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
938 u64 total_time_enabled
;
939 u64 total_time_running
;
942 * These are timestamps used for computing total_time_enabled
943 * and total_time_running when the event is in INACTIVE or
944 * ACTIVE state, measured in nanoseconds from an arbitrary point
946 * tstamp_enabled: the notional time when the event was enabled
947 * tstamp_running: the notional time when the event was scheduled on
948 * tstamp_stopped: in INACTIVE state, the notional time when the
949 * event was scheduled off.
956 * timestamp shadows the actual context timing but it can
957 * be safely used in NMI interrupt context. It reflects the
958 * context time as it was when the event was last scheduled in.
960 * ctx_time already accounts for ctx->timestamp. Therefore to
961 * compute ctx_time for a sample, simply add perf_clock().
965 struct perf_event_attr attr
;
969 struct hw_perf_event hw
;
971 struct perf_event_context
*ctx
;
975 * These accumulate total time (in nanoseconds) that children
976 * events have been enabled and running, respectively.
978 atomic64_t child_total_time_enabled
;
979 atomic64_t child_total_time_running
;
982 * Protect attach/detach and child_list:
984 struct mutex child_mutex
;
985 struct list_head child_list
;
986 struct perf_event
*parent
;
991 struct list_head owner_entry
;
992 struct task_struct
*owner
;
995 struct mutex mmap_mutex
;
998 struct user_struct
*mmap_user
;
999 struct ring_buffer
*rb
;
1000 struct list_head rb_entry
;
1003 wait_queue_head_t waitq
;
1004 struct fasync_struct
*fasync
;
1006 /* delayed work for NMIs and such */
1009 int pending_disable
;
1010 struct irq_work pending
;
1012 atomic_t event_limit
;
1014 void (*destroy
)(struct perf_event
*);
1015 struct rcu_head rcu_head
;
1017 struct pid_namespace
*ns
;
1020 perf_overflow_handler_t overflow_handler
;
1021 void *overflow_handler_context
;
1023 #ifdef CONFIG_EVENT_TRACING
1024 struct ftrace_event_call
*tp_event
;
1025 struct event_filter
*filter
;
1026 #ifdef CONFIG_FUNCTION_TRACER
1027 struct ftrace_ops ftrace_ops
;
1031 #ifdef CONFIG_CGROUP_PERF
1032 struct perf_cgroup
*cgrp
; /* cgroup event is attach to */
1033 int cgrp_defer_enabled
;
1036 #endif /* CONFIG_PERF_EVENTS */
1039 enum perf_event_context_type
{
1045 * struct perf_event_context - event context structure
1047 * Used as a container for task events and CPU events as well:
1049 struct perf_event_context
{
1051 enum perf_event_context_type type
;
1053 * Protect the states of the events in the list,
1054 * nr_active, and the list:
1056 raw_spinlock_t lock
;
1058 * Protect the list of events. Locking either mutex or lock
1059 * is sufficient to ensure the list doesn't change; to change
1060 * the list you need to lock both the mutex and the spinlock.
1064 struct list_head pinned_groups
;
1065 struct list_head flexible_groups
;
1066 struct list_head event_list
;
1074 struct task_struct
*task
;
1077 * Context clock, runs when context enabled.
1083 * These fields let us detect when two contexts have both
1084 * been cloned (inherited) from a common ancestor.
1086 struct perf_event_context
*parent_ctx
;
1090 int nr_cgroups
; /* cgroup evts */
1091 int nr_branch_stack
; /* branch_stack evt */
1092 struct rcu_head rcu_head
;
1096 * Number of contexts where an event can trigger:
1097 * task, softirq, hardirq, nmi.
1099 #define PERF_NR_CONTEXTS 4
1102 * struct perf_event_cpu_context - per cpu event context structure
1104 struct perf_cpu_context
{
1105 struct perf_event_context ctx
;
1106 struct perf_event_context
*task_ctx
;
1109 struct list_head rotation_list
;
1110 int jiffies_interval
;
1111 struct pmu
*active_pmu
;
1112 struct perf_cgroup
*cgrp
;
1115 struct perf_output_handle
{
1116 struct perf_event
*event
;
1117 struct ring_buffer
*rb
;
1118 unsigned long wakeup
;
1124 #ifdef CONFIG_PERF_EVENTS
1126 extern int perf_pmu_register(struct pmu
*pmu
, char *name
, int type
);
1127 extern void perf_pmu_unregister(struct pmu
*pmu
);
1129 extern int perf_num_counters(void);
1130 extern const char *perf_pmu_name(void);
1131 extern void __perf_event_task_sched_in(struct task_struct
*prev
,
1132 struct task_struct
*task
);
1133 extern void __perf_event_task_sched_out(struct task_struct
*prev
,
1134 struct task_struct
*next
);
1135 extern int perf_event_init_task(struct task_struct
*child
);
1136 extern void perf_event_exit_task(struct task_struct
*child
);
1137 extern void perf_event_free_task(struct task_struct
*task
);
1138 extern void perf_event_delayed_put(struct task_struct
*task
);
1139 extern void perf_event_print_debug(void);
1140 extern void perf_pmu_disable(struct pmu
*pmu
);
1141 extern void perf_pmu_enable(struct pmu
*pmu
);
1142 extern int perf_event_task_disable(void);
1143 extern int perf_event_task_enable(void);
1144 extern int perf_event_refresh(struct perf_event
*event
, int refresh
);
1145 extern void perf_event_update_userpage(struct perf_event
*event
);
1146 extern int perf_event_release_kernel(struct perf_event
*event
);
1147 extern struct perf_event
*
1148 perf_event_create_kernel_counter(struct perf_event_attr
*attr
,
1150 struct task_struct
*task
,
1151 perf_overflow_handler_t callback
,
1153 extern void perf_pmu_migrate_context(struct pmu
*pmu
,
1154 int src_cpu
, int dst_cpu
);
1155 extern u64
perf_event_read_value(struct perf_event
*event
,
1156 u64
*enabled
, u64
*running
);
1159 struct perf_sample_data
{
1176 struct perf_callchain_entry
*callchain
;
1177 struct perf_raw_record
*raw
;
1178 struct perf_branch_stack
*br_stack
;
1179 struct perf_regs_user regs_user
;
1180 u64 stack_user_size
;
1183 static inline void perf_sample_data_init(struct perf_sample_data
*data
,
1184 u64 addr
, u64 period
)
1186 /* remaining struct members initialized in perf_prepare_sample() */
1189 data
->br_stack
= NULL
;
1190 data
->period
= period
;
1191 data
->regs_user
.abi
= PERF_SAMPLE_REGS_ABI_NONE
;
1192 data
->regs_user
.regs
= NULL
;
1193 data
->stack_user_size
= 0;
1196 extern void perf_output_sample(struct perf_output_handle
*handle
,
1197 struct perf_event_header
*header
,
1198 struct perf_sample_data
*data
,
1199 struct perf_event
*event
);
1200 extern void perf_prepare_sample(struct perf_event_header
*header
,
1201 struct perf_sample_data
*data
,
1202 struct perf_event
*event
,
1203 struct pt_regs
*regs
);
1205 extern int perf_event_overflow(struct perf_event
*event
,
1206 struct perf_sample_data
*data
,
1207 struct pt_regs
*regs
);
1209 static inline bool is_sampling_event(struct perf_event
*event
)
1211 return event
->attr
.sample_period
!= 0;
1215 * Return 1 for a software event, 0 for a hardware event
1217 static inline int is_software_event(struct perf_event
*event
)
1219 return event
->pmu
->task_ctx_nr
== perf_sw_context
;
1222 extern struct static_key perf_swevent_enabled
[PERF_COUNT_SW_MAX
];
1224 extern void __perf_sw_event(u32
, u64
, struct pt_regs
*, u64
);
1226 #ifndef perf_arch_fetch_caller_regs
1227 static inline void perf_arch_fetch_caller_regs(struct pt_regs
*regs
, unsigned long ip
) { }
1231 * Take a snapshot of the regs. Skip ip and frame pointer to
1232 * the nth caller. We only need a few of the regs:
1233 * - ip for PERF_SAMPLE_IP
1234 * - cs for user_mode() tests
1235 * - bp for callchains
1236 * - eflags, for future purposes, just in case
1238 static inline void perf_fetch_caller_regs(struct pt_regs
*regs
)
1240 memset(regs
, 0, sizeof(*regs
));
1242 perf_arch_fetch_caller_regs(regs
, CALLER_ADDR0
);
1245 static __always_inline
void
1246 perf_sw_event(u32 event_id
, u64 nr
, struct pt_regs
*regs
, u64 addr
)
1248 struct pt_regs hot_regs
;
1250 if (static_key_false(&perf_swevent_enabled
[event_id
])) {
1252 perf_fetch_caller_regs(&hot_regs
);
1255 __perf_sw_event(event_id
, nr
, regs
, addr
);
1259 extern struct static_key_deferred perf_sched_events
;
1261 static inline void perf_event_task_sched_in(struct task_struct
*prev
,
1262 struct task_struct
*task
)
1264 if (static_key_false(&perf_sched_events
.key
))
1265 __perf_event_task_sched_in(prev
, task
);
1268 static inline void perf_event_task_sched_out(struct task_struct
*prev
,
1269 struct task_struct
*next
)
1271 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES
, 1, NULL
, 0);
1273 if (static_key_false(&perf_sched_events
.key
))
1274 __perf_event_task_sched_out(prev
, next
);
1277 extern void perf_event_mmap(struct vm_area_struct
*vma
);
1278 extern struct perf_guest_info_callbacks
*perf_guest_cbs
;
1279 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks
*callbacks
);
1280 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
*callbacks
);
1282 extern void perf_event_comm(struct task_struct
*tsk
);
1283 extern void perf_event_fork(struct task_struct
*tsk
);
1286 DECLARE_PER_CPU(struct perf_callchain_entry
, perf_callchain_entry
);
1288 extern void perf_callchain_user(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
);
1289 extern void perf_callchain_kernel(struct perf_callchain_entry
*entry
, struct pt_regs
*regs
);
1291 static inline void perf_callchain_store(struct perf_callchain_entry
*entry
, u64 ip
)
1293 if (entry
->nr
< PERF_MAX_STACK_DEPTH
)
1294 entry
->ip
[entry
->nr
++] = ip
;
1297 extern int sysctl_perf_event_paranoid
;
1298 extern int sysctl_perf_event_mlock
;
1299 extern int sysctl_perf_event_sample_rate
;
1301 extern int perf_proc_update_handler(struct ctl_table
*table
, int write
,
1302 void __user
*buffer
, size_t *lenp
,
1305 static inline bool perf_paranoid_tracepoint_raw(void)
1307 return sysctl_perf_event_paranoid
> -1;
1310 static inline bool perf_paranoid_cpu(void)
1312 return sysctl_perf_event_paranoid
> 0;
1315 static inline bool perf_paranoid_kernel(void)
1317 return sysctl_perf_event_paranoid
> 1;
1320 extern void perf_event_init(void);
1321 extern void perf_tp_event(u64 addr
, u64 count
, void *record
,
1322 int entry_size
, struct pt_regs
*regs
,
1323 struct hlist_head
*head
, int rctx
,
1324 struct task_struct
*task
);
1325 extern void perf_bp_event(struct perf_event
*event
, void *data
);
1327 #ifndef perf_misc_flags
1328 # define perf_misc_flags(regs) \
1329 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1330 # define perf_instruction_pointer(regs) instruction_pointer(regs)
1333 static inline bool has_branch_stack(struct perf_event
*event
)
1335 return event
->attr
.sample_type
& PERF_SAMPLE_BRANCH_STACK
;
1338 extern int perf_output_begin(struct perf_output_handle
*handle
,
1339 struct perf_event
*event
, unsigned int size
);
1340 extern void perf_output_end(struct perf_output_handle
*handle
);
1341 extern unsigned int perf_output_copy(struct perf_output_handle
*handle
,
1342 const void *buf
, unsigned int len
);
1343 extern unsigned int perf_output_skip(struct perf_output_handle
*handle
,
1345 extern int perf_swevent_get_recursion_context(void);
1346 extern void perf_swevent_put_recursion_context(int rctx
);
1347 extern void perf_event_enable(struct perf_event
*event
);
1348 extern void perf_event_disable(struct perf_event
*event
);
1349 extern void perf_event_task_tick(void);
1352 perf_event_task_sched_in(struct task_struct
*prev
,
1353 struct task_struct
*task
) { }
1355 perf_event_task_sched_out(struct task_struct
*prev
,
1356 struct task_struct
*next
) { }
1357 static inline int perf_event_init_task(struct task_struct
*child
) { return 0; }
1358 static inline void perf_event_exit_task(struct task_struct
*child
) { }
1359 static inline void perf_event_free_task(struct task_struct
*task
) { }
1360 static inline void perf_event_delayed_put(struct task_struct
*task
) { }
1361 static inline void perf_event_print_debug(void) { }
1362 static inline int perf_event_task_disable(void) { return -EINVAL
; }
1363 static inline int perf_event_task_enable(void) { return -EINVAL
; }
1364 static inline int perf_event_refresh(struct perf_event
*event
, int refresh
)
1370 perf_sw_event(u32 event_id
, u64 nr
, struct pt_regs
*regs
, u64 addr
) { }
1372 perf_bp_event(struct perf_event
*event
, void *data
) { }
1374 static inline int perf_register_guest_info_callbacks
1375 (struct perf_guest_info_callbacks
*callbacks
) { return 0; }
1376 static inline int perf_unregister_guest_info_callbacks
1377 (struct perf_guest_info_callbacks
*callbacks
) { return 0; }
1379 static inline void perf_event_mmap(struct vm_area_struct
*vma
) { }
1380 static inline void perf_event_comm(struct task_struct
*tsk
) { }
1381 static inline void perf_event_fork(struct task_struct
*tsk
) { }
1382 static inline void perf_event_init(void) { }
1383 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1384 static inline void perf_swevent_put_recursion_context(int rctx
) { }
1385 static inline void perf_event_enable(struct perf_event
*event
) { }
1386 static inline void perf_event_disable(struct perf_event
*event
) { }
1387 static inline void perf_event_task_tick(void) { }
1390 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1393 * This has to have a higher priority than migration_notifier in sched.c.
1395 #define perf_cpu_notifier(fn) \
1397 static struct notifier_block fn##_nb __cpuinitdata = \
1398 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1399 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1400 (void *)(unsigned long)smp_processor_id()); \
1401 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1402 (void *)(unsigned long)smp_processor_id()); \
1403 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1404 (void *)(unsigned long)smp_processor_id()); \
1405 register_cpu_notifier(&fn##_nb); \
1409 #define PMU_FORMAT_ATTR(_name, _format) \
1411 _name##_show(struct device *dev, \
1412 struct device_attribute *attr, \
1415 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1416 return sprintf(page, _format "\n"); \
1419 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1421 #endif /* __KERNEL__ */
1422 #endif /* _LINUX_PERF_EVENT_H */