2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
14 * Copyright (C) 1999-2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp_lock.h>
27 #include <linux/proc_fs.h>
28 #include <linux/seq_file.h>
29 #include <linux/init.h>
30 #include <linux/vmalloc.h>
32 #include <linux/sysctl.h>
33 #include <linux/list.h>
34 #include <linux/file.h>
35 #include <linux/poll.h>
36 #include <linux/vfs.h>
37 #include <linux/pagemap.h>
38 #include <linux/mount.h>
39 #include <linux/bitops.h>
40 #include <linux/capability.h>
41 #include <linux/rcupdate.h>
42 #include <linux/completion.h>
44 #include <asm/errno.h>
45 #include <asm/intrinsics.h>
47 #include <asm/perfmon.h>
48 #include <asm/processor.h>
49 #include <asm/signal.h>
50 #include <asm/system.h>
51 #include <asm/uaccess.h>
52 #include <asm/delay.h>
56 * perfmon context state
58 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
59 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
60 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
61 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
63 #define PFM_INVALID_ACTIVATION (~0UL)
66 * depth of message queue
68 #define PFM_MAX_MSGS 32
69 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
72 * type of a PMU register (bitmask).
74 * bit0 : register implemented
77 * bit4 : pmc has pmc.pm
78 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
79 * bit6-7 : register type
82 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
83 #define PFM_REG_IMPL 0x1 /* register implemented */
84 #define PFM_REG_END 0x2 /* end marker */
85 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
86 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
87 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
88 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
89 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
91 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
92 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
94 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
96 /* i assumed unsigned */
97 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
98 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
100 /* XXX: these assume that register i is implemented */
101 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
102 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
103 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
104 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
106 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
107 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
108 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
109 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
111 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
112 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
114 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
115 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
116 #define PFM_CTX_TASK(h) (h)->ctx_task
118 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
120 /* XXX: does not support more than 64 PMDs */
121 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
122 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
124 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
126 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
127 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
128 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
129 #define PFM_CODE_RR 0 /* requesting code range restriction */
130 #define PFM_DATA_RR 1 /* requestion data range restriction */
132 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
133 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
134 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
136 #define RDEP(x) (1UL<<(x))
139 * context protection macros
141 * - we need to protect against CPU concurrency (spin_lock)
142 * - we need to protect against PMU overflow interrupts (local_irq_disable)
144 * - we need to protect against PMU overflow interrupts (local_irq_disable)
146 * spin_lock_irqsave()/spin_lock_irqrestore():
147 * in SMP: local_irq_disable + spin_lock
148 * in UP : local_irq_disable
150 * spin_lock()/spin_lock():
151 * in UP : removed automatically
152 * in SMP: protect against context accesses from other CPU. interrupts
153 * are not masked. This is useful for the PMU interrupt handler
154 * because we know we will not get PMU concurrency in that code.
156 #define PROTECT_CTX(c, f) \
158 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
159 spin_lock_irqsave(&(c)->ctx_lock, f); \
160 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
163 #define UNPROTECT_CTX(c, f) \
165 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
166 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
169 #define PROTECT_CTX_NOPRINT(c, f) \
171 spin_lock_irqsave(&(c)->ctx_lock, f); \
175 #define UNPROTECT_CTX_NOPRINT(c, f) \
177 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
181 #define PROTECT_CTX_NOIRQ(c) \
183 spin_lock(&(c)->ctx_lock); \
186 #define UNPROTECT_CTX_NOIRQ(c) \
188 spin_unlock(&(c)->ctx_lock); \
194 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
195 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
196 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
198 #else /* !CONFIG_SMP */
199 #define SET_ACTIVATION(t) do {} while(0)
200 #define GET_ACTIVATION(t) do {} while(0)
201 #define INC_ACTIVATION(t) do {} while(0)
202 #endif /* CONFIG_SMP */
204 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
205 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
206 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
208 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
209 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
211 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
214 * cmp0 must be the value of pmc0
216 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
218 #define PFMFS_MAGIC 0xa0b4d889
223 #define PFM_DEBUGGING 1
227 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
230 #define DPRINT_ovfl(a) \
232 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
237 * 64-bit software counter structure
239 * the next_reset_type is applied to the next call to pfm_reset_regs()
242 unsigned long val
; /* virtual 64bit counter value */
243 unsigned long lval
; /* last reset value */
244 unsigned long long_reset
; /* reset value on sampling overflow */
245 unsigned long short_reset
; /* reset value on overflow */
246 unsigned long reset_pmds
[4]; /* which other pmds to reset when this counter overflows */
247 unsigned long smpl_pmds
[4]; /* which pmds are accessed when counter overflow */
248 unsigned long seed
; /* seed for random-number generator */
249 unsigned long mask
; /* mask for random-number generator */
250 unsigned int flags
; /* notify/do not notify */
251 unsigned long eventid
; /* overflow event identifier */
258 unsigned int block
:1; /* when 1, task will blocked on user notifications */
259 unsigned int system
:1; /* do system wide monitoring */
260 unsigned int using_dbreg
:1; /* using range restrictions (debug registers) */
261 unsigned int is_sampling
:1; /* true if using a custom format */
262 unsigned int excl_idle
:1; /* exclude idle task in system wide session */
263 unsigned int going_zombie
:1; /* context is zombie (MASKED+blocking) */
264 unsigned int trap_reason
:2; /* reason for going into pfm_handle_work() */
265 unsigned int no_msg
:1; /* no message sent on overflow */
266 unsigned int can_restart
:1; /* allowed to issue a PFM_RESTART */
267 unsigned int reserved
:22;
268 } pfm_context_flags_t
;
270 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
271 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
272 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
276 * perfmon context: encapsulates all the state of a monitoring session
279 typedef struct pfm_context
{
280 spinlock_t ctx_lock
; /* context protection */
282 pfm_context_flags_t ctx_flags
; /* bitmask of flags (block reason incl.) */
283 unsigned int ctx_state
; /* state: active/inactive (no bitfield) */
285 struct task_struct
*ctx_task
; /* task to which context is attached */
287 unsigned long ctx_ovfl_regs
[4]; /* which registers overflowed (notification) */
289 struct completion ctx_restart_done
; /* use for blocking notification mode */
291 unsigned long ctx_used_pmds
[4]; /* bitmask of PMD used */
292 unsigned long ctx_all_pmds
[4]; /* bitmask of all accessible PMDs */
293 unsigned long ctx_reload_pmds
[4]; /* bitmask of force reload PMD on ctxsw in */
295 unsigned long ctx_all_pmcs
[4]; /* bitmask of all accessible PMCs */
296 unsigned long ctx_reload_pmcs
[4]; /* bitmask of force reload PMC on ctxsw in */
297 unsigned long ctx_used_monitors
[4]; /* bitmask of monitor PMC being used */
299 unsigned long ctx_pmcs
[IA64_NUM_PMC_REGS
]; /* saved copies of PMC values */
301 unsigned int ctx_used_ibrs
[1]; /* bitmask of used IBR (speedup ctxsw in) */
302 unsigned int ctx_used_dbrs
[1]; /* bitmask of used DBR (speedup ctxsw in) */
303 unsigned long ctx_dbrs
[IA64_NUM_DBG_REGS
]; /* DBR values (cache) when not loaded */
304 unsigned long ctx_ibrs
[IA64_NUM_DBG_REGS
]; /* IBR values (cache) when not loaded */
306 pfm_counter_t ctx_pmds
[IA64_NUM_PMD_REGS
]; /* software state for PMDS */
308 u64 ctx_saved_psr_up
; /* only contains psr.up value */
310 unsigned long ctx_last_activation
; /* context last activation number for last_cpu */
311 unsigned int ctx_last_cpu
; /* CPU id of current or last CPU used (SMP only) */
312 unsigned int ctx_cpu
; /* cpu to which perfmon is applied (system wide) */
314 int ctx_fd
; /* file descriptor used my this context */
315 pfm_ovfl_arg_t ctx_ovfl_arg
; /* argument to custom buffer format handler */
317 pfm_buffer_fmt_t
*ctx_buf_fmt
; /* buffer format callbacks */
318 void *ctx_smpl_hdr
; /* points to sampling buffer header kernel vaddr */
319 unsigned long ctx_smpl_size
; /* size of sampling buffer */
320 void *ctx_smpl_vaddr
; /* user level virtual address of smpl buffer */
322 wait_queue_head_t ctx_msgq_wait
;
323 pfm_msg_t ctx_msgq
[PFM_MAX_MSGS
];
326 struct fasync_struct
*ctx_async_queue
;
328 wait_queue_head_t ctx_zombieq
; /* termination cleanup wait queue */
332 * magic number used to verify that structure is really
335 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
337 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
340 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
341 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
343 #define SET_LAST_CPU(ctx, v) do {} while(0)
344 #define GET_LAST_CPU(ctx) do {} while(0)
348 #define ctx_fl_block ctx_flags.block
349 #define ctx_fl_system ctx_flags.system
350 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
351 #define ctx_fl_is_sampling ctx_flags.is_sampling
352 #define ctx_fl_excl_idle ctx_flags.excl_idle
353 #define ctx_fl_going_zombie ctx_flags.going_zombie
354 #define ctx_fl_trap_reason ctx_flags.trap_reason
355 #define ctx_fl_no_msg ctx_flags.no_msg
356 #define ctx_fl_can_restart ctx_flags.can_restart
358 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
359 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
362 * global information about all sessions
363 * mostly used to synchronize between system wide and per-process
366 spinlock_t pfs_lock
; /* lock the structure */
368 unsigned int pfs_task_sessions
; /* number of per task sessions */
369 unsigned int pfs_sys_sessions
; /* number of per system wide sessions */
370 unsigned int pfs_sys_use_dbregs
; /* incremented when a system wide session uses debug regs */
371 unsigned int pfs_ptrace_use_dbregs
; /* incremented when a process uses debug regs */
372 struct task_struct
*pfs_sys_session
[NR_CPUS
]; /* point to task owning a system-wide session */
376 * information about a PMC or PMD.
377 * dep_pmd[]: a bitmask of dependent PMD registers
378 * dep_pmc[]: a bitmask of dependent PMC registers
380 typedef int (*pfm_reg_check_t
)(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned int cnum
, unsigned long *val
, struct pt_regs
*regs
);
384 unsigned long default_value
; /* power-on default value */
385 unsigned long reserved_mask
; /* bitmask of reserved bits */
386 pfm_reg_check_t read_check
;
387 pfm_reg_check_t write_check
;
388 unsigned long dep_pmd
[4];
389 unsigned long dep_pmc
[4];
392 /* assume cnum is a valid monitor */
393 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
396 * This structure is initialized at boot time and contains
397 * a description of the PMU main characteristics.
399 * If the probe function is defined, detection is based
400 * on its return value:
401 * - 0 means recognized PMU
402 * - anything else means not supported
403 * When the probe function is not defined, then the pmu_family field
404 * is used and it must match the host CPU family such that:
405 * - cpu->family & config->pmu_family != 0
408 unsigned long ovfl_val
; /* overflow value for counters */
410 pfm_reg_desc_t
*pmc_desc
; /* detailed PMC register dependencies descriptions */
411 pfm_reg_desc_t
*pmd_desc
; /* detailed PMD register dependencies descriptions */
413 unsigned int num_pmcs
; /* number of PMCS: computed at init time */
414 unsigned int num_pmds
; /* number of PMDS: computed at init time */
415 unsigned long impl_pmcs
[4]; /* bitmask of implemented PMCS */
416 unsigned long impl_pmds
[4]; /* bitmask of implemented PMDS */
418 char *pmu_name
; /* PMU family name */
419 unsigned int pmu_family
; /* cpuid family pattern used to identify pmu */
420 unsigned int flags
; /* pmu specific flags */
421 unsigned int num_ibrs
; /* number of IBRS: computed at init time */
422 unsigned int num_dbrs
; /* number of DBRS: computed at init time */
423 unsigned int num_counters
; /* PMC/PMD counting pairs : computed at init time */
424 int (*probe
)(void); /* customized probe routine */
425 unsigned int use_rr_dbregs
:1; /* set if debug registers used for range restriction */
430 #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
433 * debug register related type definitions
436 unsigned long ibr_mask
:56;
437 unsigned long ibr_plm
:4;
438 unsigned long ibr_ig
:3;
439 unsigned long ibr_x
:1;
443 unsigned long dbr_mask
:56;
444 unsigned long dbr_plm
:4;
445 unsigned long dbr_ig
:2;
446 unsigned long dbr_w
:1;
447 unsigned long dbr_r
:1;
458 * perfmon command descriptions
461 int (*cmd_func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
464 unsigned int cmd_narg
;
466 int (*cmd_getsize
)(void *arg
, size_t *sz
);
469 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
470 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
471 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
472 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
475 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
476 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
477 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
478 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
479 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
481 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
484 unsigned long pfm_spurious_ovfl_intr_count
; /* keep track of spurious ovfl interrupts */
485 unsigned long pfm_replay_ovfl_intr_count
; /* keep track of replayed ovfl interrupts */
486 unsigned long pfm_ovfl_intr_count
; /* keep track of ovfl interrupts */
487 unsigned long pfm_ovfl_intr_cycles
; /* cycles spent processing ovfl interrupts */
488 unsigned long pfm_ovfl_intr_cycles_min
; /* min cycles spent processing ovfl interrupts */
489 unsigned long pfm_ovfl_intr_cycles_max
; /* max cycles spent processing ovfl interrupts */
490 unsigned long pfm_smpl_handler_calls
;
491 unsigned long pfm_smpl_handler_cycles
;
492 char pad
[SMP_CACHE_BYTES
] ____cacheline_aligned
;
496 * perfmon internal variables
498 static pfm_stats_t pfm_stats
[NR_CPUS
];
499 static pfm_session_t pfm_sessions
; /* global sessions information */
501 static DEFINE_SPINLOCK(pfm_alt_install_check
);
502 static pfm_intr_handler_desc_t
*pfm_alt_intr_handler
;
504 static struct proc_dir_entry
*perfmon_dir
;
505 static pfm_uuid_t pfm_null_uuid
= {0,};
507 static spinlock_t pfm_buffer_fmt_lock
;
508 static LIST_HEAD(pfm_buffer_fmt_list
);
510 static pmu_config_t
*pmu_conf
;
512 /* sysctl() controls */
513 pfm_sysctl_t pfm_sysctl
;
514 EXPORT_SYMBOL(pfm_sysctl
);
516 static ctl_table pfm_ctl_table
[]={
517 {1, "debug", &pfm_sysctl
.debug
, sizeof(int), 0666, NULL
, &proc_dointvec
, NULL
,},
518 {2, "debug_ovfl", &pfm_sysctl
.debug_ovfl
, sizeof(int), 0666, NULL
, &proc_dointvec
, NULL
,},
519 {3, "fastctxsw", &pfm_sysctl
.fastctxsw
, sizeof(int), 0600, NULL
, &proc_dointvec
, NULL
,},
520 {4, "expert_mode", &pfm_sysctl
.expert_mode
, sizeof(int), 0600, NULL
, &proc_dointvec
, NULL
,},
523 static ctl_table pfm_sysctl_dir
[] = {
524 {1, "perfmon", NULL
, 0, 0755, pfm_ctl_table
, },
527 static ctl_table pfm_sysctl_root
[] = {
528 {1, "kernel", NULL
, 0, 0755, pfm_sysctl_dir
, },
531 static struct ctl_table_header
*pfm_sysctl_header
;
533 static int pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
535 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
536 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
539 pfm_put_task(struct task_struct
*task
)
541 if (task
!= current
) put_task_struct(task
);
545 pfm_set_task_notify(struct task_struct
*task
)
547 struct thread_info
*info
;
549 info
= (struct thread_info
*) ((char *) task
+ IA64_TASK_SIZE
);
550 set_bit(TIF_NOTIFY_RESUME
, &info
->flags
);
554 pfm_clear_task_notify(void)
556 clear_thread_flag(TIF_NOTIFY_RESUME
);
560 pfm_reserve_page(unsigned long a
)
562 SetPageReserved(vmalloc_to_page((void *)a
));
565 pfm_unreserve_page(unsigned long a
)
567 ClearPageReserved(vmalloc_to_page((void*)a
));
570 static inline unsigned long
571 pfm_protect_ctx_ctxsw(pfm_context_t
*x
)
573 spin_lock(&(x
)->ctx_lock
);
578 pfm_unprotect_ctx_ctxsw(pfm_context_t
*x
, unsigned long f
)
580 spin_unlock(&(x
)->ctx_lock
);
583 static inline unsigned int
584 pfm_do_munmap(struct mm_struct
*mm
, unsigned long addr
, size_t len
, int acct
)
586 return do_munmap(mm
, addr
, len
);
589 static inline unsigned long
590 pfm_get_unmapped_area(struct file
*file
, unsigned long addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
, unsigned long exec
)
592 return get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
597 pfmfs_get_sb(struct file_system_type
*fs_type
, int flags
, const char *dev_name
, void *data
,
598 struct vfsmount
*mnt
)
600 return get_sb_pseudo(fs_type
, "pfm:", NULL
, PFMFS_MAGIC
, mnt
);
603 static struct file_system_type pfm_fs_type
= {
605 .get_sb
= pfmfs_get_sb
,
606 .kill_sb
= kill_anon_super
,
609 DEFINE_PER_CPU(unsigned long, pfm_syst_info
);
610 DEFINE_PER_CPU(struct task_struct
*, pmu_owner
);
611 DEFINE_PER_CPU(pfm_context_t
*, pmu_ctx
);
612 DEFINE_PER_CPU(unsigned long, pmu_activation_number
);
613 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info
);
616 /* forward declaration */
617 static struct file_operations pfm_file_ops
;
620 * forward declarations
623 static void pfm_lazy_save_regs (struct task_struct
*ta
);
626 void dump_pmu_state(const char *);
627 static int pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
629 #include "perfmon_itanium.h"
630 #include "perfmon_mckinley.h"
631 #include "perfmon_montecito.h"
632 #include "perfmon_generic.h"
634 static pmu_config_t
*pmu_confs
[]={
638 &pmu_conf_gen
, /* must be last */
643 static int pfm_end_notify_user(pfm_context_t
*ctx
);
646 pfm_clear_psr_pp(void)
648 ia64_rsm(IA64_PSR_PP
);
655 ia64_ssm(IA64_PSR_PP
);
660 pfm_clear_psr_up(void)
662 ia64_rsm(IA64_PSR_UP
);
669 ia64_ssm(IA64_PSR_UP
);
673 static inline unsigned long
677 tmp
= ia64_getreg(_IA64_REG_PSR
);
683 pfm_set_psr_l(unsigned long val
)
685 ia64_setreg(_IA64_REG_PSR_L
, val
);
697 pfm_unfreeze_pmu(void)
704 pfm_restore_ibrs(unsigned long *ibrs
, unsigned int nibrs
)
708 for (i
=0; i
< nibrs
; i
++) {
709 ia64_set_ibr(i
, ibrs
[i
]);
710 ia64_dv_serialize_instruction();
716 pfm_restore_dbrs(unsigned long *dbrs
, unsigned int ndbrs
)
720 for (i
=0; i
< ndbrs
; i
++) {
721 ia64_set_dbr(i
, dbrs
[i
]);
722 ia64_dv_serialize_data();
728 * PMD[i] must be a counter. no check is made
730 static inline unsigned long
731 pfm_read_soft_counter(pfm_context_t
*ctx
, int i
)
733 return ctx
->ctx_pmds
[i
].val
+ (ia64_get_pmd(i
) & pmu_conf
->ovfl_val
);
737 * PMD[i] must be a counter. no check is made
740 pfm_write_soft_counter(pfm_context_t
*ctx
, int i
, unsigned long val
)
742 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
744 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
746 * writing to unimplemented part is ignore, so we do not need to
749 ia64_set_pmd(i
, val
& ovfl_val
);
753 pfm_get_new_msg(pfm_context_t
*ctx
)
757 next
= (ctx
->ctx_msgq_tail
+1) % PFM_MAX_MSGS
;
759 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
760 if (next
== ctx
->ctx_msgq_head
) return NULL
;
762 idx
= ctx
->ctx_msgq_tail
;
763 ctx
->ctx_msgq_tail
= next
;
765 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, idx
));
767 return ctx
->ctx_msgq
+idx
;
771 pfm_get_next_msg(pfm_context_t
*ctx
)
775 DPRINT(("ctx=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
777 if (PFM_CTXQ_EMPTY(ctx
)) return NULL
;
782 msg
= ctx
->ctx_msgq
+ctx
->ctx_msgq_head
;
787 ctx
->ctx_msgq_head
= (ctx
->ctx_msgq_head
+1) % PFM_MAX_MSGS
;
789 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, msg
->pfm_gen_msg
.msg_type
));
795 pfm_reset_msgq(pfm_context_t
*ctx
)
797 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
798 DPRINT(("ctx=%p msgq reset\n", ctx
));
802 pfm_rvmalloc(unsigned long size
)
807 size
= PAGE_ALIGN(size
);
810 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
811 memset(mem
, 0, size
);
812 addr
= (unsigned long)mem
;
814 pfm_reserve_page(addr
);
823 pfm_rvfree(void *mem
, unsigned long size
)
828 DPRINT(("freeing physical buffer @%p size=%lu\n", mem
, size
));
829 addr
= (unsigned long) mem
;
830 while ((long) size
> 0) {
831 pfm_unreserve_page(addr
);
840 static pfm_context_t
*
841 pfm_context_alloc(void)
846 * allocate context descriptor
847 * must be able to free with interrupts disabled
849 ctx
= kmalloc(sizeof(pfm_context_t
), GFP_KERNEL
);
851 memset(ctx
, 0, sizeof(pfm_context_t
));
852 DPRINT(("alloc ctx @%p\n", ctx
));
858 pfm_context_free(pfm_context_t
*ctx
)
861 DPRINT(("free ctx @%p\n", ctx
));
867 pfm_mask_monitoring(struct task_struct
*task
)
869 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
870 struct thread_struct
*th
= &task
->thread
;
871 unsigned long mask
, val
, ovfl_mask
;
874 DPRINT_ovfl(("masking monitoring for [%d]\n", task
->pid
));
876 ovfl_mask
= pmu_conf
->ovfl_val
;
878 * monitoring can only be masked as a result of a valid
879 * counter overflow. In UP, it means that the PMU still
880 * has an owner. Note that the owner can be different
881 * from the current task. However the PMU state belongs
883 * In SMP, a valid overflow only happens when task is
884 * current. Therefore if we come here, we know that
885 * the PMU state belongs to the current task, therefore
886 * we can access the live registers.
888 * So in both cases, the live register contains the owner's
889 * state. We can ONLY touch the PMU registers and NOT the PSR.
891 * As a consequence to this call, the thread->pmds[] array
892 * contains stale information which must be ignored
893 * when context is reloaded AND monitoring is active (see
896 mask
= ctx
->ctx_used_pmds
[0];
897 for (i
= 0; mask
; i
++, mask
>>=1) {
898 /* skip non used pmds */
899 if ((mask
& 0x1) == 0) continue;
900 val
= ia64_get_pmd(i
);
902 if (PMD_IS_COUNTING(i
)) {
904 * we rebuild the full 64 bit value of the counter
906 ctx
->ctx_pmds
[i
].val
+= (val
& ovfl_mask
);
908 ctx
->ctx_pmds
[i
].val
= val
;
910 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
912 ctx
->ctx_pmds
[i
].val
,
916 * mask monitoring by setting the privilege level to 0
917 * we cannot use psr.pp/psr.up for this, it is controlled by
920 * if task is current, modify actual registers, otherwise modify
921 * thread save state, i.e., what will be restored in pfm_load_regs()
923 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
924 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
925 if ((mask
& 0x1) == 0UL) continue;
926 ia64_set_pmc(i
, th
->pmcs
[i
] & ~0xfUL
);
927 th
->pmcs
[i
] &= ~0xfUL
;
928 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i
, th
->pmcs
[i
]));
931 * make all of this visible
937 * must always be done with task == current
939 * context must be in MASKED state when calling
942 pfm_restore_monitoring(struct task_struct
*task
)
944 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
945 struct thread_struct
*th
= &task
->thread
;
946 unsigned long mask
, ovfl_mask
;
947 unsigned long psr
, val
;
950 is_system
= ctx
->ctx_fl_system
;
951 ovfl_mask
= pmu_conf
->ovfl_val
;
953 if (task
!= current
) {
954 printk(KERN_ERR
"perfmon.%d: invalid task[%d] current[%d]\n", __LINE__
, task
->pid
, current
->pid
);
957 if (ctx
->ctx_state
!= PFM_CTX_MASKED
) {
958 printk(KERN_ERR
"perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__
,
959 task
->pid
, current
->pid
, ctx
->ctx_state
);
964 * monitoring is masked via the PMC.
965 * As we restore their value, we do not want each counter to
966 * restart right away. We stop monitoring using the PSR,
967 * restore the PMC (and PMD) and then re-establish the psr
968 * as it was. Note that there can be no pending overflow at
969 * this point, because monitoring was MASKED.
971 * system-wide session are pinned and self-monitoring
973 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
975 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
981 * first, we restore the PMD
983 mask
= ctx
->ctx_used_pmds
[0];
984 for (i
= 0; mask
; i
++, mask
>>=1) {
985 /* skip non used pmds */
986 if ((mask
& 0x1) == 0) continue;
988 if (PMD_IS_COUNTING(i
)) {
990 * we split the 64bit value according to
993 val
= ctx
->ctx_pmds
[i
].val
& ovfl_mask
;
994 ctx
->ctx_pmds
[i
].val
&= ~ovfl_mask
;
996 val
= ctx
->ctx_pmds
[i
].val
;
998 ia64_set_pmd(i
, val
);
1000 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1002 ctx
->ctx_pmds
[i
].val
,
1008 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
1009 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
1010 if ((mask
& 0x1) == 0UL) continue;
1011 th
->pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1012 ia64_set_pmc(i
, th
->pmcs
[i
]);
1013 DPRINT(("[%d] pmc[%d]=0x%lx\n", task
->pid
, i
, th
->pmcs
[i
]));
1018 * must restore DBR/IBR because could be modified while masked
1019 * XXX: need to optimize
1021 if (ctx
->ctx_fl_using_dbreg
) {
1022 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
1023 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
1029 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
1031 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
1038 pfm_save_pmds(unsigned long *pmds
, unsigned long mask
)
1044 for (i
=0; mask
; i
++, mask
>>=1) {
1045 if (mask
& 0x1) pmds
[i
] = ia64_get_pmd(i
);
1050 * reload from thread state (used for ctxw only)
1053 pfm_restore_pmds(unsigned long *pmds
, unsigned long mask
)
1056 unsigned long val
, ovfl_val
= pmu_conf
->ovfl_val
;
1058 for (i
=0; mask
; i
++, mask
>>=1) {
1059 if ((mask
& 0x1) == 0) continue;
1060 val
= PMD_IS_COUNTING(i
) ? pmds
[i
] & ovfl_val
: pmds
[i
];
1061 ia64_set_pmd(i
, val
);
1067 * propagate PMD from context to thread-state
1070 pfm_copy_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
1072 struct thread_struct
*thread
= &task
->thread
;
1073 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
1074 unsigned long mask
= ctx
->ctx_all_pmds
[0];
1078 DPRINT(("mask=0x%lx\n", mask
));
1080 for (i
=0; mask
; i
++, mask
>>=1) {
1082 val
= ctx
->ctx_pmds
[i
].val
;
1085 * We break up the 64 bit value into 2 pieces
1086 * the lower bits go to the machine state in the
1087 * thread (will be reloaded on ctxsw in).
1088 * The upper part stays in the soft-counter.
1090 if (PMD_IS_COUNTING(i
)) {
1091 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
1094 thread
->pmds
[i
] = val
;
1096 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1099 ctx
->ctx_pmds
[i
].val
));
1104 * propagate PMC from context to thread-state
1107 pfm_copy_pmcs(struct task_struct
*task
, pfm_context_t
*ctx
)
1109 struct thread_struct
*thread
= &task
->thread
;
1110 unsigned long mask
= ctx
->ctx_all_pmcs
[0];
1113 DPRINT(("mask=0x%lx\n", mask
));
1115 for (i
=0; mask
; i
++, mask
>>=1) {
1116 /* masking 0 with ovfl_val yields 0 */
1117 thread
->pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1118 DPRINT(("pmc[%d]=0x%lx\n", i
, thread
->pmcs
[i
]));
1125 pfm_restore_pmcs(unsigned long *pmcs
, unsigned long mask
)
1129 for (i
=0; mask
; i
++, mask
>>=1) {
1130 if ((mask
& 0x1) == 0) continue;
1131 ia64_set_pmc(i
, pmcs
[i
]);
1137 pfm_uuid_cmp(pfm_uuid_t a
, pfm_uuid_t b
)
1139 return memcmp(a
, b
, sizeof(pfm_uuid_t
));
1143 pfm_buf_fmt_exit(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, struct pt_regs
*regs
)
1146 if (fmt
->fmt_exit
) ret
= (*fmt
->fmt_exit
)(task
, buf
, regs
);
1151 pfm_buf_fmt_getsize(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
, int cpu
, void *arg
, unsigned long *size
)
1154 if (fmt
->fmt_getsize
) ret
= (*fmt
->fmt_getsize
)(task
, flags
, cpu
, arg
, size
);
1160 pfm_buf_fmt_validate(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
,
1164 if (fmt
->fmt_validate
) ret
= (*fmt
->fmt_validate
)(task
, flags
, cpu
, arg
);
1169 pfm_buf_fmt_init(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, unsigned int flags
,
1173 if (fmt
->fmt_init
) ret
= (*fmt
->fmt_init
)(task
, buf
, flags
, cpu
, arg
);
1178 pfm_buf_fmt_restart(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1181 if (fmt
->fmt_restart
) ret
= (*fmt
->fmt_restart
)(task
, ctrl
, buf
, regs
);
1186 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1189 if (fmt
->fmt_restart_active
) ret
= (*fmt
->fmt_restart_active
)(task
, ctrl
, buf
, regs
);
1193 static pfm_buffer_fmt_t
*
1194 __pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1196 struct list_head
* pos
;
1197 pfm_buffer_fmt_t
* entry
;
1199 list_for_each(pos
, &pfm_buffer_fmt_list
) {
1200 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
1201 if (pfm_uuid_cmp(uuid
, entry
->fmt_uuid
) == 0)
1208 * find a buffer format based on its uuid
1210 static pfm_buffer_fmt_t
*
1211 pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1213 pfm_buffer_fmt_t
* fmt
;
1214 spin_lock(&pfm_buffer_fmt_lock
);
1215 fmt
= __pfm_find_buffer_fmt(uuid
);
1216 spin_unlock(&pfm_buffer_fmt_lock
);
1221 pfm_register_buffer_fmt(pfm_buffer_fmt_t
*fmt
)
1225 /* some sanity checks */
1226 if (fmt
== NULL
|| fmt
->fmt_name
== NULL
) return -EINVAL
;
1228 /* we need at least a handler */
1229 if (fmt
->fmt_handler
== NULL
) return -EINVAL
;
1232 * XXX: need check validity of fmt_arg_size
1235 spin_lock(&pfm_buffer_fmt_lock
);
1237 if (__pfm_find_buffer_fmt(fmt
->fmt_uuid
)) {
1238 printk(KERN_ERR
"perfmon: duplicate sampling format: %s\n", fmt
->fmt_name
);
1242 list_add(&fmt
->fmt_list
, &pfm_buffer_fmt_list
);
1243 printk(KERN_INFO
"perfmon: added sampling format %s\n", fmt
->fmt_name
);
1246 spin_unlock(&pfm_buffer_fmt_lock
);
1249 EXPORT_SYMBOL(pfm_register_buffer_fmt
);
1252 pfm_unregister_buffer_fmt(pfm_uuid_t uuid
)
1254 pfm_buffer_fmt_t
*fmt
;
1257 spin_lock(&pfm_buffer_fmt_lock
);
1259 fmt
= __pfm_find_buffer_fmt(uuid
);
1261 printk(KERN_ERR
"perfmon: cannot unregister format, not found\n");
1265 list_del_init(&fmt
->fmt_list
);
1266 printk(KERN_INFO
"perfmon: removed sampling format: %s\n", fmt
->fmt_name
);
1269 spin_unlock(&pfm_buffer_fmt_lock
);
1273 EXPORT_SYMBOL(pfm_unregister_buffer_fmt
);
1275 extern void update_pal_halt_status(int);
1278 pfm_reserve_session(struct task_struct
*task
, int is_syswide
, unsigned int cpu
)
1280 unsigned long flags
;
1282 * validy checks on cpu_mask have been done upstream
1286 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1287 pfm_sessions
.pfs_sys_sessions
,
1288 pfm_sessions
.pfs_task_sessions
,
1289 pfm_sessions
.pfs_sys_use_dbregs
,
1295 * cannot mix system wide and per-task sessions
1297 if (pfm_sessions
.pfs_task_sessions
> 0UL) {
1298 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1299 pfm_sessions
.pfs_task_sessions
));
1303 if (pfm_sessions
.pfs_sys_session
[cpu
]) goto error_conflict
;
1305 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu
, smp_processor_id()));
1307 pfm_sessions
.pfs_sys_session
[cpu
] = task
;
1309 pfm_sessions
.pfs_sys_sessions
++ ;
1312 if (pfm_sessions
.pfs_sys_sessions
) goto abort
;
1313 pfm_sessions
.pfs_task_sessions
++;
1316 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1317 pfm_sessions
.pfs_sys_sessions
,
1318 pfm_sessions
.pfs_task_sessions
,
1319 pfm_sessions
.pfs_sys_use_dbregs
,
1324 * disable default_idle() to go to PAL_HALT
1326 update_pal_halt_status(0);
1333 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1334 pfm_sessions
.pfs_sys_session
[cpu
]->pid
,
1344 pfm_unreserve_session(pfm_context_t
*ctx
, int is_syswide
, unsigned int cpu
)
1346 unsigned long flags
;
1348 * validy checks on cpu_mask have been done upstream
1352 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1353 pfm_sessions
.pfs_sys_sessions
,
1354 pfm_sessions
.pfs_task_sessions
,
1355 pfm_sessions
.pfs_sys_use_dbregs
,
1361 pfm_sessions
.pfs_sys_session
[cpu
] = NULL
;
1363 * would not work with perfmon+more than one bit in cpu_mask
1365 if (ctx
&& ctx
->ctx_fl_using_dbreg
) {
1366 if (pfm_sessions
.pfs_sys_use_dbregs
== 0) {
1367 printk(KERN_ERR
"perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx
);
1369 pfm_sessions
.pfs_sys_use_dbregs
--;
1372 pfm_sessions
.pfs_sys_sessions
--;
1374 pfm_sessions
.pfs_task_sessions
--;
1376 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1377 pfm_sessions
.pfs_sys_sessions
,
1378 pfm_sessions
.pfs_task_sessions
,
1379 pfm_sessions
.pfs_sys_use_dbregs
,
1384 * if possible, enable default_idle() to go into PAL_HALT
1386 if (pfm_sessions
.pfs_task_sessions
== 0 && pfm_sessions
.pfs_sys_sessions
== 0)
1387 update_pal_halt_status(1);
1395 * removes virtual mapping of the sampling buffer.
1396 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1397 * a PROTECT_CTX() section.
1400 pfm_remove_smpl_mapping(struct task_struct
*task
, void *vaddr
, unsigned long size
)
1405 if (task
->mm
== NULL
|| size
== 0UL || vaddr
== NULL
) {
1406 printk(KERN_ERR
"perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task
->pid
, task
->mm
);
1410 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr
, size
));
1413 * does the actual unmapping
1415 down_write(&task
->mm
->mmap_sem
);
1417 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr
, size
));
1419 r
= pfm_do_munmap(task
->mm
, (unsigned long)vaddr
, size
, 0);
1421 up_write(&task
->mm
->mmap_sem
);
1423 printk(KERN_ERR
"perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task
->pid
, vaddr
, size
);
1426 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr
, size
, r
));
1432 * free actual physical storage used by sampling buffer
1436 pfm_free_smpl_buffer(pfm_context_t
*ctx
)
1438 pfm_buffer_fmt_t
*fmt
;
1440 if (ctx
->ctx_smpl_hdr
== NULL
) goto invalid_free
;
1443 * we won't use the buffer format anymore
1445 fmt
= ctx
->ctx_buf_fmt
;
1447 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1450 ctx
->ctx_smpl_vaddr
));
1452 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1457 pfm_rvfree(ctx
->ctx_smpl_hdr
, ctx
->ctx_smpl_size
);
1459 ctx
->ctx_smpl_hdr
= NULL
;
1460 ctx
->ctx_smpl_size
= 0UL;
1465 printk(KERN_ERR
"perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current
->pid
);
1471 pfm_exit_smpl_buffer(pfm_buffer_fmt_t
*fmt
)
1473 if (fmt
== NULL
) return;
1475 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1480 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1481 * no real gain from having the whole whorehouse mounted. So we don't need
1482 * any operations on the root directory. However, we need a non-trivial
1483 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1485 static struct vfsmount
*pfmfs_mnt
;
1490 int err
= register_filesystem(&pfm_fs_type
);
1492 pfmfs_mnt
= kern_mount(&pfm_fs_type
);
1493 err
= PTR_ERR(pfmfs_mnt
);
1494 if (IS_ERR(pfmfs_mnt
))
1495 unregister_filesystem(&pfm_fs_type
);
1505 unregister_filesystem(&pfm_fs_type
);
1510 pfm_read(struct file
*filp
, char __user
*buf
, size_t size
, loff_t
*ppos
)
1515 unsigned long flags
;
1516 DECLARE_WAITQUEUE(wait
, current
);
1517 if (PFM_IS_FILE(filp
) == 0) {
1518 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", current
->pid
);
1522 ctx
= (pfm_context_t
*)filp
->private_data
;
1524 printk(KERN_ERR
"perfmon: pfm_read: NULL ctx [%d]\n", current
->pid
);
1529 * check even when there is no message
1531 if (size
< sizeof(pfm_msg_t
)) {
1532 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx
, sizeof(pfm_msg_t
)));
1536 PROTECT_CTX(ctx
, flags
);
1539 * put ourselves on the wait queue
1541 add_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1549 set_current_state(TASK_INTERRUPTIBLE
);
1551 DPRINT(("head=%d tail=%d\n", ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
1554 if(PFM_CTXQ_EMPTY(ctx
) == 0) break;
1556 UNPROTECT_CTX(ctx
, flags
);
1559 * check non-blocking read
1562 if(filp
->f_flags
& O_NONBLOCK
) break;
1565 * check pending signals
1567 if(signal_pending(current
)) {
1572 * no message, so wait
1576 PROTECT_CTX(ctx
, flags
);
1578 DPRINT(("[%d] back to running ret=%ld\n", current
->pid
, ret
));
1579 set_current_state(TASK_RUNNING
);
1580 remove_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1582 if (ret
< 0) goto abort
;
1585 msg
= pfm_get_next_msg(ctx
);
1587 printk(KERN_ERR
"perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx
, current
->pid
);
1591 DPRINT(("fd=%d type=%d\n", msg
->pfm_gen_msg
.msg_ctx_fd
, msg
->pfm_gen_msg
.msg_type
));
1594 if(copy_to_user(buf
, msg
, sizeof(pfm_msg_t
)) == 0) ret
= sizeof(pfm_msg_t
);
1597 UNPROTECT_CTX(ctx
, flags
);
1603 pfm_write(struct file
*file
, const char __user
*ubuf
,
1604 size_t size
, loff_t
*ppos
)
1606 DPRINT(("pfm_write called\n"));
1611 pfm_poll(struct file
*filp
, poll_table
* wait
)
1614 unsigned long flags
;
1615 unsigned int mask
= 0;
1617 if (PFM_IS_FILE(filp
) == 0) {
1618 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", current
->pid
);
1622 ctx
= (pfm_context_t
*)filp
->private_data
;
1624 printk(KERN_ERR
"perfmon: pfm_poll: NULL ctx [%d]\n", current
->pid
);
1629 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx
->ctx_fd
));
1631 poll_wait(filp
, &ctx
->ctx_msgq_wait
, wait
);
1633 PROTECT_CTX(ctx
, flags
);
1635 if (PFM_CTXQ_EMPTY(ctx
) == 0)
1636 mask
= POLLIN
| POLLRDNORM
;
1638 UNPROTECT_CTX(ctx
, flags
);
1640 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx
->ctx_fd
, mask
));
1646 pfm_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
, unsigned long arg
)
1648 DPRINT(("pfm_ioctl called\n"));
1653 * interrupt cannot be masked when coming here
1656 pfm_do_fasync(int fd
, struct file
*filp
, pfm_context_t
*ctx
, int on
)
1660 ret
= fasync_helper (fd
, filp
, on
, &ctx
->ctx_async_queue
);
1662 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1666 ctx
->ctx_async_queue
, ret
));
1672 pfm_fasync(int fd
, struct file
*filp
, int on
)
1677 if (PFM_IS_FILE(filp
) == 0) {
1678 printk(KERN_ERR
"perfmon: pfm_fasync bad magic [%d]\n", current
->pid
);
1682 ctx
= (pfm_context_t
*)filp
->private_data
;
1684 printk(KERN_ERR
"perfmon: pfm_fasync NULL ctx [%d]\n", current
->pid
);
1688 * we cannot mask interrupts during this call because this may
1689 * may go to sleep if memory is not readily avalaible.
1691 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1692 * done in caller. Serialization of this function is ensured by caller.
1694 ret
= pfm_do_fasync(fd
, filp
, ctx
, on
);
1697 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1700 ctx
->ctx_async_queue
, ret
));
1707 * this function is exclusively called from pfm_close().
1708 * The context is not protected at that time, nor are interrupts
1709 * on the remote CPU. That's necessary to avoid deadlocks.
1712 pfm_syswide_force_stop(void *info
)
1714 pfm_context_t
*ctx
= (pfm_context_t
*)info
;
1715 struct pt_regs
*regs
= task_pt_regs(current
);
1716 struct task_struct
*owner
;
1717 unsigned long flags
;
1720 if (ctx
->ctx_cpu
!= smp_processor_id()) {
1721 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1723 smp_processor_id());
1726 owner
= GET_PMU_OWNER();
1727 if (owner
!= ctx
->ctx_task
) {
1728 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1730 owner
->pid
, ctx
->ctx_task
->pid
);
1733 if (GET_PMU_CTX() != ctx
) {
1734 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1736 GET_PMU_CTX(), ctx
);
1740 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx
->ctx_task
->pid
));
1742 * the context is already protected in pfm_close(), we simply
1743 * need to mask interrupts to avoid a PMU interrupt race on
1746 local_irq_save(flags
);
1748 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
1750 DPRINT(("context_unload returned %d\n", ret
));
1754 * unmask interrupts, PMU interrupts are now spurious here
1756 local_irq_restore(flags
);
1760 pfm_syswide_cleanup_other_cpu(pfm_context_t
*ctx
)
1764 DPRINT(("calling CPU%d for cleanup\n", ctx
->ctx_cpu
));
1765 ret
= smp_call_function_single(ctx
->ctx_cpu
, pfm_syswide_force_stop
, ctx
, 0, 1);
1766 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx
->ctx_cpu
, ret
));
1768 #endif /* CONFIG_SMP */
1771 * called for each close(). Partially free resources.
1772 * When caller is self-monitoring, the context is unloaded.
1775 pfm_flush(struct file
*filp
, fl_owner_t id
)
1778 struct task_struct
*task
;
1779 struct pt_regs
*regs
;
1780 unsigned long flags
;
1781 unsigned long smpl_buf_size
= 0UL;
1782 void *smpl_buf_vaddr
= NULL
;
1783 int state
, is_system
;
1785 if (PFM_IS_FILE(filp
) == 0) {
1786 DPRINT(("bad magic for\n"));
1790 ctx
= (pfm_context_t
*)filp
->private_data
;
1792 printk(KERN_ERR
"perfmon: pfm_flush: NULL ctx [%d]\n", current
->pid
);
1797 * remove our file from the async queue, if we use this mode.
1798 * This can be done without the context being protected. We come
1799 * here when the context has become unreacheable by other tasks.
1801 * We may still have active monitoring at this point and we may
1802 * end up in pfm_overflow_handler(). However, fasync_helper()
1803 * operates with interrupts disabled and it cleans up the
1804 * queue. If the PMU handler is called prior to entering
1805 * fasync_helper() then it will send a signal. If it is
1806 * invoked after, it will find an empty queue and no
1807 * signal will be sent. In both case, we are safe
1809 if (filp
->f_flags
& FASYNC
) {
1810 DPRINT(("cleaning up async_queue=%p\n", ctx
->ctx_async_queue
));
1811 pfm_do_fasync (-1, filp
, ctx
, 0);
1814 PROTECT_CTX(ctx
, flags
);
1816 state
= ctx
->ctx_state
;
1817 is_system
= ctx
->ctx_fl_system
;
1819 task
= PFM_CTX_TASK(ctx
);
1820 regs
= task_pt_regs(task
);
1822 DPRINT(("ctx_state=%d is_current=%d\n",
1824 task
== current
? 1 : 0));
1827 * if state == UNLOADED, then task is NULL
1831 * we must stop and unload because we are losing access to the context.
1833 if (task
== current
) {
1836 * the task IS the owner but it migrated to another CPU: that's bad
1837 * but we must handle this cleanly. Unfortunately, the kernel does
1838 * not provide a mechanism to block migration (while the context is loaded).
1840 * We need to release the resource on the ORIGINAL cpu.
1842 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
1844 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
1846 * keep context protected but unmask interrupt for IPI
1848 local_irq_restore(flags
);
1850 pfm_syswide_cleanup_other_cpu(ctx
);
1853 * restore interrupt masking
1855 local_irq_save(flags
);
1858 * context is unloaded at this point
1861 #endif /* CONFIG_SMP */
1864 DPRINT(("forcing unload\n"));
1866 * stop and unload, returning with state UNLOADED
1867 * and session unreserved.
1869 pfm_context_unload(ctx
, NULL
, 0, regs
);
1871 DPRINT(("ctx_state=%d\n", ctx
->ctx_state
));
1876 * remove virtual mapping, if any, for the calling task.
1877 * cannot reset ctx field until last user is calling close().
1879 * ctx_smpl_vaddr must never be cleared because it is needed
1880 * by every task with access to the context
1882 * When called from do_exit(), the mm context is gone already, therefore
1883 * mm is NULL, i.e., the VMA is already gone and we do not have to
1886 if (ctx
->ctx_smpl_vaddr
&& current
->mm
) {
1887 smpl_buf_vaddr
= ctx
->ctx_smpl_vaddr
;
1888 smpl_buf_size
= ctx
->ctx_smpl_size
;
1891 UNPROTECT_CTX(ctx
, flags
);
1894 * if there was a mapping, then we systematically remove it
1895 * at this point. Cannot be done inside critical section
1896 * because some VM function reenables interrupts.
1899 if (smpl_buf_vaddr
) pfm_remove_smpl_mapping(current
, smpl_buf_vaddr
, smpl_buf_size
);
1904 * called either on explicit close() or from exit_files().
1905 * Only the LAST user of the file gets to this point, i.e., it is
1908 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1909 * (fput()),i.e, last task to access the file. Nobody else can access the
1910 * file at this point.
1912 * When called from exit_files(), the VMA has been freed because exit_mm()
1913 * is executed before exit_files().
1915 * When called from exit_files(), the current task is not yet ZOMBIE but we
1916 * flush the PMU state to the context.
1919 pfm_close(struct inode
*inode
, struct file
*filp
)
1922 struct task_struct
*task
;
1923 struct pt_regs
*regs
;
1924 DECLARE_WAITQUEUE(wait
, current
);
1925 unsigned long flags
;
1926 unsigned long smpl_buf_size
= 0UL;
1927 void *smpl_buf_addr
= NULL
;
1928 int free_possible
= 1;
1929 int state
, is_system
;
1931 DPRINT(("pfm_close called private=%p\n", filp
->private_data
));
1933 if (PFM_IS_FILE(filp
) == 0) {
1934 DPRINT(("bad magic\n"));
1938 ctx
= (pfm_context_t
*)filp
->private_data
;
1940 printk(KERN_ERR
"perfmon: pfm_close: NULL ctx [%d]\n", current
->pid
);
1944 PROTECT_CTX(ctx
, flags
);
1946 state
= ctx
->ctx_state
;
1947 is_system
= ctx
->ctx_fl_system
;
1949 task
= PFM_CTX_TASK(ctx
);
1950 regs
= task_pt_regs(task
);
1952 DPRINT(("ctx_state=%d is_current=%d\n",
1954 task
== current
? 1 : 0));
1957 * if task == current, then pfm_flush() unloaded the context
1959 if (state
== PFM_CTX_UNLOADED
) goto doit
;
1962 * context is loaded/masked and task != current, we need to
1963 * either force an unload or go zombie
1967 * The task is currently blocked or will block after an overflow.
1968 * we must force it to wakeup to get out of the
1969 * MASKED state and transition to the unloaded state by itself.
1971 * This situation is only possible for per-task mode
1973 if (state
== PFM_CTX_MASKED
&& CTX_OVFL_NOBLOCK(ctx
) == 0) {
1976 * set a "partial" zombie state to be checked
1977 * upon return from down() in pfm_handle_work().
1979 * We cannot use the ZOMBIE state, because it is checked
1980 * by pfm_load_regs() which is called upon wakeup from down().
1981 * In such case, it would free the context and then we would
1982 * return to pfm_handle_work() which would access the
1983 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1984 * but visible to pfm_handle_work().
1986 * For some window of time, we have a zombie context with
1987 * ctx_state = MASKED and not ZOMBIE
1989 ctx
->ctx_fl_going_zombie
= 1;
1992 * force task to wake up from MASKED state
1994 complete(&ctx
->ctx_restart_done
);
1996 DPRINT(("waking up ctx_state=%d\n", state
));
1999 * put ourself to sleep waiting for the other
2000 * task to report completion
2002 * the context is protected by mutex, therefore there
2003 * is no risk of being notified of completion before
2004 * begin actually on the waitq.
2006 set_current_state(TASK_INTERRUPTIBLE
);
2007 add_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2009 UNPROTECT_CTX(ctx
, flags
);
2012 * XXX: check for signals :
2013 * - ok for explicit close
2014 * - not ok when coming from exit_files()
2019 PROTECT_CTX(ctx
, flags
);
2022 remove_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2023 set_current_state(TASK_RUNNING
);
2026 * context is unloaded at this point
2028 DPRINT(("after zombie wakeup ctx_state=%d for\n", state
));
2030 else if (task
!= current
) {
2033 * switch context to zombie state
2035 ctx
->ctx_state
= PFM_CTX_ZOMBIE
;
2037 DPRINT(("zombie ctx for [%d]\n", task
->pid
));
2039 * cannot free the context on the spot. deferred until
2040 * the task notices the ZOMBIE state
2044 pfm_context_unload(ctx
, NULL
, 0, regs
);
2049 /* reload state, may have changed during opening of critical section */
2050 state
= ctx
->ctx_state
;
2053 * the context is still attached to a task (possibly current)
2054 * we cannot destroy it right now
2058 * we must free the sampling buffer right here because
2059 * we cannot rely on it being cleaned up later by the
2060 * monitored task. It is not possible to free vmalloc'ed
2061 * memory in pfm_load_regs(). Instead, we remove the buffer
2062 * now. should there be subsequent PMU overflow originally
2063 * meant for sampling, the will be converted to spurious
2064 * and that's fine because the monitoring tools is gone anyway.
2066 if (ctx
->ctx_smpl_hdr
) {
2067 smpl_buf_addr
= ctx
->ctx_smpl_hdr
;
2068 smpl_buf_size
= ctx
->ctx_smpl_size
;
2069 /* no more sampling */
2070 ctx
->ctx_smpl_hdr
= NULL
;
2071 ctx
->ctx_fl_is_sampling
= 0;
2074 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2080 if (smpl_buf_addr
) pfm_exit_smpl_buffer(ctx
->ctx_buf_fmt
);
2083 * UNLOADED that the session has already been unreserved.
2085 if (state
== PFM_CTX_ZOMBIE
) {
2086 pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, ctx
->ctx_cpu
);
2090 * disconnect file descriptor from context must be done
2093 filp
->private_data
= NULL
;
2096 * if we free on the spot, the context is now completely unreacheable
2097 * from the callers side. The monitored task side is also cut, so we
2100 * If we have a deferred free, only the caller side is disconnected.
2102 UNPROTECT_CTX(ctx
, flags
);
2105 * All memory free operations (especially for vmalloc'ed memory)
2106 * MUST be done with interrupts ENABLED.
2108 if (smpl_buf_addr
) pfm_rvfree(smpl_buf_addr
, smpl_buf_size
);
2111 * return the memory used by the context
2113 if (free_possible
) pfm_context_free(ctx
);
2119 pfm_no_open(struct inode
*irrelevant
, struct file
*dontcare
)
2121 DPRINT(("pfm_no_open called\n"));
2127 static struct file_operations pfm_file_ops
= {
2128 .llseek
= no_llseek
,
2133 .open
= pfm_no_open
, /* special open code to disallow open via /proc */
2134 .fasync
= pfm_fasync
,
2135 .release
= pfm_close
,
2140 pfmfs_delete_dentry(struct dentry
*dentry
)
2145 static struct dentry_operations pfmfs_dentry_operations
= {
2146 .d_delete
= pfmfs_delete_dentry
,
2151 pfm_alloc_fd(struct file
**cfile
)
2154 struct file
*file
= NULL
;
2155 struct inode
* inode
;
2159 fd
= get_unused_fd();
2160 if (fd
< 0) return -ENFILE
;
2164 file
= get_empty_filp();
2165 if (!file
) goto out
;
2168 * allocate a new inode
2170 inode
= new_inode(pfmfs_mnt
->mnt_sb
);
2171 if (!inode
) goto out
;
2173 DPRINT(("new inode ino=%ld @%p\n", inode
->i_ino
, inode
));
2175 inode
->i_mode
= S_IFCHR
|S_IRUGO
;
2176 inode
->i_uid
= current
->fsuid
;
2177 inode
->i_gid
= current
->fsgid
;
2179 sprintf(name
, "[%lu]", inode
->i_ino
);
2181 this.len
= strlen(name
);
2182 this.hash
= inode
->i_ino
;
2187 * allocate a new dcache entry
2189 file
->f_dentry
= d_alloc(pfmfs_mnt
->mnt_sb
->s_root
, &this);
2190 if (!file
->f_dentry
) goto out
;
2192 file
->f_dentry
->d_op
= &pfmfs_dentry_operations
;
2194 d_add(file
->f_dentry
, inode
);
2195 file
->f_vfsmnt
= mntget(pfmfs_mnt
);
2196 file
->f_mapping
= inode
->i_mapping
;
2198 file
->f_op
= &pfm_file_ops
;
2199 file
->f_mode
= FMODE_READ
;
2200 file
->f_flags
= O_RDONLY
;
2204 * may have to delay until context is attached?
2206 fd_install(fd
, file
);
2209 * the file structure we will use
2215 if (file
) put_filp(file
);
2221 pfm_free_fd(int fd
, struct file
*file
)
2223 struct files_struct
*files
= current
->files
;
2224 struct fdtable
*fdt
;
2227 * there ie no fd_uninstall(), so we do it here
2229 spin_lock(&files
->file_lock
);
2230 fdt
= files_fdtable(files
);
2231 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
2232 spin_unlock(&files
->file_lock
);
2240 pfm_remap_buffer(struct vm_area_struct
*vma
, unsigned long buf
, unsigned long addr
, unsigned long size
)
2242 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf
, addr
, size
));
2245 unsigned long pfn
= ia64_tpa(buf
) >> PAGE_SHIFT
;
2248 if (remap_pfn_range(vma
, addr
, pfn
, PAGE_SIZE
, PAGE_READONLY
))
2259 * allocate a sampling buffer and remaps it into the user address space of the task
2262 pfm_smpl_buffer_alloc(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned long rsize
, void **user_vaddr
)
2264 struct mm_struct
*mm
= task
->mm
;
2265 struct vm_area_struct
*vma
= NULL
;
2271 * the fixed header + requested size and align to page boundary
2273 size
= PAGE_ALIGN(rsize
);
2275 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize
, size
));
2278 * check requested size to avoid Denial-of-service attacks
2279 * XXX: may have to refine this test
2280 * Check against address space limit.
2282 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2285 if (size
> task
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
2289 * We do the easy to undo allocations first.
2291 * pfm_rvmalloc(), clears the buffer, so there is no leak
2293 smpl_buf
= pfm_rvmalloc(size
);
2294 if (smpl_buf
== NULL
) {
2295 DPRINT(("Can't allocate sampling buffer\n"));
2299 DPRINT(("smpl_buf @%p\n", smpl_buf
));
2302 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
2304 DPRINT(("Cannot allocate vma\n"));
2307 memset(vma
, 0, sizeof(*vma
));
2310 * partially initialize the vma for the sampling buffer
2313 vma
->vm_flags
= VM_READ
| VM_MAYREAD
|VM_RESERVED
;
2314 vma
->vm_page_prot
= PAGE_READONLY
; /* XXX may need to change */
2317 * Now we have everything we need and we can initialize
2318 * and connect all the data structures
2321 ctx
->ctx_smpl_hdr
= smpl_buf
;
2322 ctx
->ctx_smpl_size
= size
; /* aligned size */
2325 * Let's do the difficult operations next.
2327 * now we atomically find some area in the address space and
2328 * remap the buffer in it.
2330 down_write(&task
->mm
->mmap_sem
);
2332 /* find some free area in address space, must have mmap sem held */
2333 vma
->vm_start
= pfm_get_unmapped_area(NULL
, 0, size
, 0, MAP_PRIVATE
|MAP_ANONYMOUS
, 0);
2334 if (vma
->vm_start
== 0UL) {
2335 DPRINT(("Cannot find unmapped area for size %ld\n", size
));
2336 up_write(&task
->mm
->mmap_sem
);
2339 vma
->vm_end
= vma
->vm_start
+ size
;
2340 vma
->vm_pgoff
= vma
->vm_start
>> PAGE_SHIFT
;
2342 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size
, ctx
->ctx_smpl_hdr
, vma
->vm_start
));
2344 /* can only be applied to current task, need to have the mm semaphore held when called */
2345 if (pfm_remap_buffer(vma
, (unsigned long)smpl_buf
, vma
->vm_start
, size
)) {
2346 DPRINT(("Can't remap buffer\n"));
2347 up_write(&task
->mm
->mmap_sem
);
2352 * now insert the vma in the vm list for the process, must be
2353 * done with mmap lock held
2355 insert_vm_struct(mm
, vma
);
2357 mm
->total_vm
+= size
>> PAGE_SHIFT
;
2358 vm_stat_account(vma
->vm_mm
, vma
->vm_flags
, vma
->vm_file
,
2360 up_write(&task
->mm
->mmap_sem
);
2363 * keep track of user level virtual address
2365 ctx
->ctx_smpl_vaddr
= (void *)vma
->vm_start
;
2366 *(unsigned long *)user_vaddr
= vma
->vm_start
;
2371 kmem_cache_free(vm_area_cachep
, vma
);
2373 pfm_rvfree(smpl_buf
, size
);
2379 * XXX: do something better here
2382 pfm_bad_permissions(struct task_struct
*task
)
2384 /* inspired by ptrace_attach() */
2385 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2394 return ((current
->uid
!= task
->euid
)
2395 || (current
->uid
!= task
->suid
)
2396 || (current
->uid
!= task
->uid
)
2397 || (current
->gid
!= task
->egid
)
2398 || (current
->gid
!= task
->sgid
)
2399 || (current
->gid
!= task
->gid
)) && !capable(CAP_SYS_PTRACE
);
2403 pfarg_is_sane(struct task_struct
*task
, pfarg_context_t
*pfx
)
2409 ctx_flags
= pfx
->ctx_flags
;
2411 if (ctx_flags
& PFM_FL_SYSTEM_WIDE
) {
2414 * cannot block in this mode
2416 if (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) {
2417 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2422 /* probably more to add here */
2428 pfm_setup_buffer_fmt(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned int ctx_flags
,
2429 unsigned int cpu
, pfarg_context_t
*arg
)
2431 pfm_buffer_fmt_t
*fmt
= NULL
;
2432 unsigned long size
= 0UL;
2434 void *fmt_arg
= NULL
;
2436 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2438 /* invoke and lock buffer format, if found */
2439 fmt
= pfm_find_buffer_fmt(arg
->ctx_smpl_buf_id
);
2441 DPRINT(("[%d] cannot find buffer format\n", task
->pid
));
2446 * buffer argument MUST be contiguous to pfarg_context_t
2448 if (fmt
->fmt_arg_size
) fmt_arg
= PFM_CTXARG_BUF_ARG(arg
);
2450 ret
= pfm_buf_fmt_validate(fmt
, task
, ctx_flags
, cpu
, fmt_arg
);
2452 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task
->pid
, ctx_flags
, cpu
, fmt_arg
, ret
));
2454 if (ret
) goto error
;
2456 /* link buffer format and context */
2457 ctx
->ctx_buf_fmt
= fmt
;
2460 * check if buffer format wants to use perfmon buffer allocation/mapping service
2462 ret
= pfm_buf_fmt_getsize(fmt
, task
, ctx_flags
, cpu
, fmt_arg
, &size
);
2463 if (ret
) goto error
;
2467 * buffer is always remapped into the caller's address space
2469 ret
= pfm_smpl_buffer_alloc(current
, ctx
, size
, &uaddr
);
2470 if (ret
) goto error
;
2472 /* keep track of user address of buffer */
2473 arg
->ctx_smpl_vaddr
= uaddr
;
2475 ret
= pfm_buf_fmt_init(fmt
, task
, ctx
->ctx_smpl_hdr
, ctx_flags
, cpu
, fmt_arg
);
2482 pfm_reset_pmu_state(pfm_context_t
*ctx
)
2487 * install reset values for PMC.
2489 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
2490 if (PMC_IS_IMPL(i
) == 0) continue;
2491 ctx
->ctx_pmcs
[i
] = PMC_DFL_VAL(i
);
2492 DPRINT(("pmc[%d]=0x%lx\n", i
, ctx
->ctx_pmcs
[i
]));
2495 * PMD registers are set to 0UL when the context in memset()
2499 * On context switched restore, we must restore ALL pmc and ALL pmd even
2500 * when they are not actively used by the task. In UP, the incoming process
2501 * may otherwise pick up left over PMC, PMD state from the previous process.
2502 * As opposed to PMD, stale PMC can cause harm to the incoming
2503 * process because they may change what is being measured.
2504 * Therefore, we must systematically reinstall the entire
2505 * PMC state. In SMP, the same thing is possible on the
2506 * same CPU but also on between 2 CPUs.
2508 * The problem with PMD is information leaking especially
2509 * to user level when psr.sp=0
2511 * There is unfortunately no easy way to avoid this problem
2512 * on either UP or SMP. This definitively slows down the
2513 * pfm_load_regs() function.
2517 * bitmask of all PMCs accessible to this context
2519 * PMC0 is treated differently.
2521 ctx
->ctx_all_pmcs
[0] = pmu_conf
->impl_pmcs
[0] & ~0x1;
2524 * bitmask of all PMDs that are accesible to this context
2526 ctx
->ctx_all_pmds
[0] = pmu_conf
->impl_pmds
[0];
2528 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx
->ctx_fd
, ctx
->ctx_all_pmcs
[0],ctx
->ctx_all_pmds
[0]));
2531 * useful in case of re-enable after disable
2533 ctx
->ctx_used_ibrs
[0] = 0UL;
2534 ctx
->ctx_used_dbrs
[0] = 0UL;
2538 pfm_ctx_getsize(void *arg
, size_t *sz
)
2540 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2541 pfm_buffer_fmt_t
*fmt
;
2545 if (!pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) return 0;
2547 fmt
= pfm_find_buffer_fmt(req
->ctx_smpl_buf_id
);
2549 DPRINT(("cannot find buffer format\n"));
2552 /* get just enough to copy in user parameters */
2553 *sz
= fmt
->fmt_arg_size
;
2554 DPRINT(("arg_size=%lu\n", *sz
));
2562 * cannot attach if :
2564 * - task not owned by caller
2565 * - task incompatible with context mode
2568 pfm_task_incompatible(pfm_context_t
*ctx
, struct task_struct
*task
)
2571 * no kernel task or task not owner by caller
2573 if (task
->mm
== NULL
) {
2574 DPRINT(("task [%d] has not memory context (kernel thread)\n", task
->pid
));
2577 if (pfm_bad_permissions(task
)) {
2578 DPRINT(("no permission to attach to [%d]\n", task
->pid
));
2582 * cannot block in self-monitoring mode
2584 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && task
== current
) {
2585 DPRINT(("cannot load a blocking context on self for [%d]\n", task
->pid
));
2589 if (task
->exit_state
== EXIT_ZOMBIE
) {
2590 DPRINT(("cannot attach to zombie task [%d]\n", task
->pid
));
2595 * always ok for self
2597 if (task
== current
) return 0;
2599 if ((task
->state
!= TASK_STOPPED
) && (task
->state
!= TASK_TRACED
)) {
2600 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task
->pid
, task
->state
));
2604 * make sure the task is off any CPU
2606 wait_task_inactive(task
);
2608 /* more to come... */
2614 pfm_get_task(pfm_context_t
*ctx
, pid_t pid
, struct task_struct
**task
)
2616 struct task_struct
*p
= current
;
2619 /* XXX: need to add more checks here */
2620 if (pid
< 2) return -EPERM
;
2622 if (pid
!= current
->pid
) {
2624 read_lock(&tasklist_lock
);
2626 p
= find_task_by_pid(pid
);
2628 /* make sure task cannot go away while we operate on it */
2629 if (p
) get_task_struct(p
);
2631 read_unlock(&tasklist_lock
);
2633 if (p
== NULL
) return -ESRCH
;
2636 ret
= pfm_task_incompatible(ctx
, p
);
2639 } else if (p
!= current
) {
2648 pfm_context_create(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2650 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2655 /* let's check the arguments first */
2656 ret
= pfarg_is_sane(current
, req
);
2657 if (ret
< 0) return ret
;
2659 ctx_flags
= req
->ctx_flags
;
2663 ctx
= pfm_context_alloc();
2664 if (!ctx
) goto error
;
2666 ret
= pfm_alloc_fd(&filp
);
2667 if (ret
< 0) goto error_file
;
2669 req
->ctx_fd
= ctx
->ctx_fd
= ret
;
2672 * attach context to file
2674 filp
->private_data
= ctx
;
2677 * does the user want to sample?
2679 if (pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) {
2680 ret
= pfm_setup_buffer_fmt(current
, ctx
, ctx_flags
, 0, req
);
2681 if (ret
) goto buffer_error
;
2685 * init context protection lock
2687 spin_lock_init(&ctx
->ctx_lock
);
2690 * context is unloaded
2692 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
2695 * initialization of context's flags
2697 ctx
->ctx_fl_block
= (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) ? 1 : 0;
2698 ctx
->ctx_fl_system
= (ctx_flags
& PFM_FL_SYSTEM_WIDE
) ? 1: 0;
2699 ctx
->ctx_fl_is_sampling
= ctx
->ctx_buf_fmt
? 1 : 0; /* assume record() is defined */
2700 ctx
->ctx_fl_no_msg
= (ctx_flags
& PFM_FL_OVFL_NO_MSG
) ? 1: 0;
2702 * will move to set properties
2703 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2707 * init restart semaphore to locked
2709 init_completion(&ctx
->ctx_restart_done
);
2712 * activation is used in SMP only
2714 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
2715 SET_LAST_CPU(ctx
, -1);
2718 * initialize notification message queue
2720 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
2721 init_waitqueue_head(&ctx
->ctx_msgq_wait
);
2722 init_waitqueue_head(&ctx
->ctx_zombieq
);
2724 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2729 ctx
->ctx_fl_excl_idle
,
2734 * initialize soft PMU state
2736 pfm_reset_pmu_state(ctx
);
2741 pfm_free_fd(ctx
->ctx_fd
, filp
);
2743 if (ctx
->ctx_buf_fmt
) {
2744 pfm_buf_fmt_exit(ctx
->ctx_buf_fmt
, current
, NULL
, regs
);
2747 pfm_context_free(ctx
);
2753 static inline unsigned long
2754 pfm_new_counter_value (pfm_counter_t
*reg
, int is_long_reset
)
2756 unsigned long val
= is_long_reset
? reg
->long_reset
: reg
->short_reset
;
2757 unsigned long new_seed
, old_seed
= reg
->seed
, mask
= reg
->mask
;
2758 extern unsigned long carta_random32 (unsigned long seed
);
2760 if (reg
->flags
& PFM_REGFL_RANDOM
) {
2761 new_seed
= carta_random32(old_seed
);
2762 val
-= (old_seed
& mask
); /* counter values are negative numbers! */
2763 if ((mask
>> 32) != 0)
2764 /* construct a full 64-bit random value: */
2765 new_seed
|= carta_random32(old_seed
>> 32) << 32;
2766 reg
->seed
= new_seed
;
2773 pfm_reset_regs_masked(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2775 unsigned long mask
= ovfl_regs
[0];
2776 unsigned long reset_others
= 0UL;
2781 * now restore reset value on sampling overflowed counters
2783 mask
>>= PMU_FIRST_COUNTER
;
2784 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2786 if ((mask
& 0x1UL
) == 0UL) continue;
2788 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2789 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2791 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2795 * Now take care of resetting the other registers
2797 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2799 if ((reset_others
& 0x1) == 0) continue;
2801 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2803 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2804 is_long_reset
? "long" : "short", i
, val
));
2809 pfm_reset_regs(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2811 unsigned long mask
= ovfl_regs
[0];
2812 unsigned long reset_others
= 0UL;
2816 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs
[0], is_long_reset
));
2818 if (ctx
->ctx_state
== PFM_CTX_MASKED
) {
2819 pfm_reset_regs_masked(ctx
, ovfl_regs
, is_long_reset
);
2824 * now restore reset value on sampling overflowed counters
2826 mask
>>= PMU_FIRST_COUNTER
;
2827 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2829 if ((mask
& 0x1UL
) == 0UL) continue;
2831 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2832 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2834 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2836 pfm_write_soft_counter(ctx
, i
, val
);
2840 * Now take care of resetting the other registers
2842 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2844 if ((reset_others
& 0x1) == 0) continue;
2846 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2848 if (PMD_IS_COUNTING(i
)) {
2849 pfm_write_soft_counter(ctx
, i
, val
);
2851 ia64_set_pmd(i
, val
);
2853 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2854 is_long_reset
? "long" : "short", i
, val
));
2860 pfm_write_pmcs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2862 struct thread_struct
*thread
= NULL
;
2863 struct task_struct
*task
;
2864 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
2865 unsigned long value
, pmc_pm
;
2866 unsigned long smpl_pmds
, reset_pmds
, impl_pmds
;
2867 unsigned int cnum
, reg_flags
, flags
, pmc_type
;
2868 int i
, can_access_pmu
= 0, is_loaded
, is_system
, expert_mode
;
2869 int is_monitor
, is_counting
, state
;
2871 pfm_reg_check_t wr_func
;
2872 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2874 state
= ctx
->ctx_state
;
2875 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
2876 is_system
= ctx
->ctx_fl_system
;
2877 task
= ctx
->ctx_task
;
2878 impl_pmds
= pmu_conf
->impl_pmds
[0];
2880 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
2883 thread
= &task
->thread
;
2885 * In system wide and when the context is loaded, access can only happen
2886 * when the caller is running on the CPU being monitored by the session.
2887 * It does not have to be the owner (ctx_task) of the context per se.
2889 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
2890 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
2893 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
2895 expert_mode
= pfm_sysctl
.expert_mode
;
2897 for (i
= 0; i
< count
; i
++, req
++) {
2899 cnum
= req
->reg_num
;
2900 reg_flags
= req
->reg_flags
;
2901 value
= req
->reg_value
;
2902 smpl_pmds
= req
->reg_smpl_pmds
[0];
2903 reset_pmds
= req
->reg_reset_pmds
[0];
2907 if (cnum
>= PMU_MAX_PMCS
) {
2908 DPRINT(("pmc%u is invalid\n", cnum
));
2912 pmc_type
= pmu_conf
->pmc_desc
[cnum
].type
;
2913 pmc_pm
= (value
>> pmu_conf
->pmc_desc
[cnum
].pm_pos
) & 0x1;
2914 is_counting
= (pmc_type
& PFM_REG_COUNTING
) == PFM_REG_COUNTING
? 1 : 0;
2915 is_monitor
= (pmc_type
& PFM_REG_MONITOR
) == PFM_REG_MONITOR
? 1 : 0;
2918 * we reject all non implemented PMC as well
2919 * as attempts to modify PMC[0-3] which are used
2920 * as status registers by the PMU
2922 if ((pmc_type
& PFM_REG_IMPL
) == 0 || (pmc_type
& PFM_REG_CONTROL
) == PFM_REG_CONTROL
) {
2923 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum
, pmc_type
));
2926 wr_func
= pmu_conf
->pmc_desc
[cnum
].write_check
;
2928 * If the PMC is a monitor, then if the value is not the default:
2929 * - system-wide session: PMCx.pm=1 (privileged monitor)
2930 * - per-task : PMCx.pm=0 (user monitor)
2932 if (is_monitor
&& value
!= PMC_DFL_VAL(cnum
) && is_system
^ pmc_pm
) {
2933 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2942 * enforce generation of overflow interrupt. Necessary on all
2945 value
|= 1 << PMU_PMC_OI
;
2947 if (reg_flags
& PFM_REGFL_OVFL_NOTIFY
) {
2948 flags
|= PFM_REGFL_OVFL_NOTIFY
;
2951 if (reg_flags
& PFM_REGFL_RANDOM
) flags
|= PFM_REGFL_RANDOM
;
2953 /* verify validity of smpl_pmds */
2954 if ((smpl_pmds
& impl_pmds
) != smpl_pmds
) {
2955 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds
, cnum
));
2959 /* verify validity of reset_pmds */
2960 if ((reset_pmds
& impl_pmds
) != reset_pmds
) {
2961 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds
, cnum
));
2965 if (reg_flags
& (PFM_REGFL_OVFL_NOTIFY
|PFM_REGFL_RANDOM
)) {
2966 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum
));
2969 /* eventid on non-counting monitors are ignored */
2973 * execute write checker, if any
2975 if (likely(expert_mode
== 0 && wr_func
)) {
2976 ret
= (*wr_func
)(task
, ctx
, cnum
, &value
, regs
);
2977 if (ret
) goto error
;
2982 * no error on this register
2984 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
2987 * Now we commit the changes to the software state
2991 * update overflow information
2995 * full flag update each time a register is programmed
2997 ctx
->ctx_pmds
[cnum
].flags
= flags
;
2999 ctx
->ctx_pmds
[cnum
].reset_pmds
[0] = reset_pmds
;
3000 ctx
->ctx_pmds
[cnum
].smpl_pmds
[0] = smpl_pmds
;
3001 ctx
->ctx_pmds
[cnum
].eventid
= req
->reg_smpl_eventid
;
3004 * Mark all PMDS to be accessed as used.
3006 * We do not keep track of PMC because we have to
3007 * systematically restore ALL of them.
3009 * We do not update the used_monitors mask, because
3010 * if we have not programmed them, then will be in
3011 * a quiescent state, therefore we will not need to
3012 * mask/restore then when context is MASKED.
3014 CTX_USED_PMD(ctx
, reset_pmds
);
3015 CTX_USED_PMD(ctx
, smpl_pmds
);
3017 * make sure we do not try to reset on
3018 * restart because we have established new values
3020 if (state
== PFM_CTX_MASKED
) ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3023 * Needed in case the user does not initialize the equivalent
3024 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3025 * possible leak here.
3027 CTX_USED_PMD(ctx
, pmu_conf
->pmc_desc
[cnum
].dep_pmd
[0]);
3030 * keep track of the monitor PMC that we are using.
3031 * we save the value of the pmc in ctx_pmcs[] and if
3032 * the monitoring is not stopped for the context we also
3033 * place it in the saved state area so that it will be
3034 * picked up later by the context switch code.
3036 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3038 * The value in thread->pmcs[] may be modified on overflow, i.e., when
3039 * monitoring needs to be stopped.
3041 if (is_monitor
) CTX_USED_MONITOR(ctx
, 1UL << cnum
);
3044 * update context state
3046 ctx
->ctx_pmcs
[cnum
] = value
;
3050 * write thread state
3052 if (is_system
== 0) thread
->pmcs
[cnum
] = value
;
3055 * write hardware register if we can
3057 if (can_access_pmu
) {
3058 ia64_set_pmc(cnum
, value
);
3063 * per-task SMP only here
3065 * we are guaranteed that the task is not running on the other CPU,
3066 * we indicate that this PMD will need to be reloaded if the task
3067 * is rescheduled on the CPU it ran last on.
3069 ctx
->ctx_reload_pmcs
[0] |= 1UL << cnum
;
3074 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3080 ctx
->ctx_all_pmcs
[0],
3081 ctx
->ctx_used_pmds
[0],
3082 ctx
->ctx_pmds
[cnum
].eventid
,
3085 ctx
->ctx_reload_pmcs
[0],
3086 ctx
->ctx_used_monitors
[0],
3087 ctx
->ctx_ovfl_regs
[0]));
3091 * make sure the changes are visible
3093 if (can_access_pmu
) ia64_srlz_d();
3097 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3102 pfm_write_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3104 struct thread_struct
*thread
= NULL
;
3105 struct task_struct
*task
;
3106 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3107 unsigned long value
, hw_value
, ovfl_mask
;
3109 int i
, can_access_pmu
= 0, state
;
3110 int is_counting
, is_loaded
, is_system
, expert_mode
;
3112 pfm_reg_check_t wr_func
;
3115 state
= ctx
->ctx_state
;
3116 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3117 is_system
= ctx
->ctx_fl_system
;
3118 ovfl_mask
= pmu_conf
->ovfl_val
;
3119 task
= ctx
->ctx_task
;
3121 if (unlikely(state
== PFM_CTX_ZOMBIE
)) return -EINVAL
;
3124 * on both UP and SMP, we can only write to the PMC when the task is
3125 * the owner of the local PMU.
3127 if (likely(is_loaded
)) {
3128 thread
= &task
->thread
;
3130 * In system wide and when the context is loaded, access can only happen
3131 * when the caller is running on the CPU being monitored by the session.
3132 * It does not have to be the owner (ctx_task) of the context per se.
3134 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3135 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3138 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3140 expert_mode
= pfm_sysctl
.expert_mode
;
3142 for (i
= 0; i
< count
; i
++, req
++) {
3144 cnum
= req
->reg_num
;
3145 value
= req
->reg_value
;
3147 if (!PMD_IS_IMPL(cnum
)) {
3148 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum
));
3151 is_counting
= PMD_IS_COUNTING(cnum
);
3152 wr_func
= pmu_conf
->pmd_desc
[cnum
].write_check
;
3155 * execute write checker, if any
3157 if (unlikely(expert_mode
== 0 && wr_func
)) {
3158 unsigned long v
= value
;
3160 ret
= (*wr_func
)(task
, ctx
, cnum
, &v
, regs
);
3161 if (ret
) goto abort_mission
;
3168 * no error on this register
3170 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
3173 * now commit changes to software state
3178 * update virtualized (64bits) counter
3182 * write context state
3184 ctx
->ctx_pmds
[cnum
].lval
= value
;
3187 * when context is load we use the split value
3190 hw_value
= value
& ovfl_mask
;
3191 value
= value
& ~ovfl_mask
;
3195 * update reset values (not just for counters)
3197 ctx
->ctx_pmds
[cnum
].long_reset
= req
->reg_long_reset
;
3198 ctx
->ctx_pmds
[cnum
].short_reset
= req
->reg_short_reset
;
3201 * update randomization parameters (not just for counters)
3203 ctx
->ctx_pmds
[cnum
].seed
= req
->reg_random_seed
;
3204 ctx
->ctx_pmds
[cnum
].mask
= req
->reg_random_mask
;
3207 * update context value
3209 ctx
->ctx_pmds
[cnum
].val
= value
;
3212 * Keep track of what we use
3214 * We do not keep track of PMC because we have to
3215 * systematically restore ALL of them.
3217 CTX_USED_PMD(ctx
, PMD_PMD_DEP(cnum
));
3220 * mark this PMD register used as well
3222 CTX_USED_PMD(ctx
, RDEP(cnum
));
3225 * make sure we do not try to reset on
3226 * restart because we have established new values
3228 if (is_counting
&& state
== PFM_CTX_MASKED
) {
3229 ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3234 * write thread state
3236 if (is_system
== 0) thread
->pmds
[cnum
] = hw_value
;
3239 * write hardware register if we can
3241 if (can_access_pmu
) {
3242 ia64_set_pmd(cnum
, hw_value
);
3246 * we are guaranteed that the task is not running on the other CPU,
3247 * we indicate that this PMD will need to be reloaded if the task
3248 * is rescheduled on the CPU it ran last on.
3250 ctx
->ctx_reload_pmds
[0] |= 1UL << cnum
;
3255 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3256 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3262 ctx
->ctx_pmds
[cnum
].val
,
3263 ctx
->ctx_pmds
[cnum
].short_reset
,
3264 ctx
->ctx_pmds
[cnum
].long_reset
,
3265 PMC_OVFL_NOTIFY(ctx
, cnum
) ? 'Y':'N',
3266 ctx
->ctx_pmds
[cnum
].seed
,
3267 ctx
->ctx_pmds
[cnum
].mask
,
3268 ctx
->ctx_used_pmds
[0],
3269 ctx
->ctx_pmds
[cnum
].reset_pmds
[0],
3270 ctx
->ctx_reload_pmds
[0],
3271 ctx
->ctx_all_pmds
[0],
3272 ctx
->ctx_ovfl_regs
[0]));
3276 * make changes visible
3278 if (can_access_pmu
) ia64_srlz_d();
3284 * for now, we have only one possibility for error
3286 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3291 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3292 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3293 * interrupt is delivered during the call, it will be kept pending until we leave, making
3294 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3295 * guaranteed to return consistent data to the user, it may simply be old. It is not
3296 * trivial to treat the overflow while inside the call because you may end up in
3297 * some module sampling buffer code causing deadlocks.
3300 pfm_read_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3302 struct thread_struct
*thread
= NULL
;
3303 struct task_struct
*task
;
3304 unsigned long val
= 0UL, lval
, ovfl_mask
, sval
;
3305 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3306 unsigned int cnum
, reg_flags
= 0;
3307 int i
, can_access_pmu
= 0, state
;
3308 int is_loaded
, is_system
, is_counting
, expert_mode
;
3310 pfm_reg_check_t rd_func
;
3313 * access is possible when loaded only for
3314 * self-monitoring tasks or in UP mode
3317 state
= ctx
->ctx_state
;
3318 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3319 is_system
= ctx
->ctx_fl_system
;
3320 ovfl_mask
= pmu_conf
->ovfl_val
;
3321 task
= ctx
->ctx_task
;
3323 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3325 if (likely(is_loaded
)) {
3326 thread
= &task
->thread
;
3328 * In system wide and when the context is loaded, access can only happen
3329 * when the caller is running on the CPU being monitored by the session.
3330 * It does not have to be the owner (ctx_task) of the context per se.
3332 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3333 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3337 * this can be true when not self-monitoring only in UP
3339 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3341 if (can_access_pmu
) ia64_srlz_d();
3343 expert_mode
= pfm_sysctl
.expert_mode
;
3345 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3351 * on both UP and SMP, we can only read the PMD from the hardware register when
3352 * the task is the owner of the local PMU.
3355 for (i
= 0; i
< count
; i
++, req
++) {
3357 cnum
= req
->reg_num
;
3358 reg_flags
= req
->reg_flags
;
3360 if (unlikely(!PMD_IS_IMPL(cnum
))) goto error
;
3362 * we can only read the register that we use. That includes
3363 * the one we explicitely initialize AND the one we want included
3364 * in the sampling buffer (smpl_regs).
3366 * Having this restriction allows optimization in the ctxsw routine
3367 * without compromising security (leaks)
3369 if (unlikely(!CTX_IS_USED_PMD(ctx
, cnum
))) goto error
;
3371 sval
= ctx
->ctx_pmds
[cnum
].val
;
3372 lval
= ctx
->ctx_pmds
[cnum
].lval
;
3373 is_counting
= PMD_IS_COUNTING(cnum
);
3376 * If the task is not the current one, then we check if the
3377 * PMU state is still in the local live register due to lazy ctxsw.
3378 * If true, then we read directly from the registers.
3380 if (can_access_pmu
){
3381 val
= ia64_get_pmd(cnum
);
3384 * context has been saved
3385 * if context is zombie, then task does not exist anymore.
3386 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3388 val
= is_loaded
? thread
->pmds
[cnum
] : 0UL;
3390 rd_func
= pmu_conf
->pmd_desc
[cnum
].read_check
;
3394 * XXX: need to check for overflow when loaded
3401 * execute read checker, if any
3403 if (unlikely(expert_mode
== 0 && rd_func
)) {
3404 unsigned long v
= val
;
3405 ret
= (*rd_func
)(ctx
->ctx_task
, ctx
, cnum
, &v
, regs
);
3406 if (ret
) goto error
;
3411 PFM_REG_RETFLAG_SET(reg_flags
, 0);
3413 DPRINT(("pmd[%u]=0x%lx\n", cnum
, val
));
3416 * update register return value, abort all if problem during copy.
3417 * we only modify the reg_flags field. no check mode is fine because
3418 * access has been verified upfront in sys_perfmonctl().
3420 req
->reg_value
= val
;
3421 req
->reg_flags
= reg_flags
;
3422 req
->reg_last_reset_val
= lval
;
3428 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3433 pfm_mod_write_pmcs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3437 if (req
== NULL
) return -EINVAL
;
3439 ctx
= GET_PMU_CTX();
3441 if (ctx
== NULL
) return -EINVAL
;
3444 * for now limit to current task, which is enough when calling
3445 * from overflow handler
3447 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3449 return pfm_write_pmcs(ctx
, req
, nreq
, regs
);
3451 EXPORT_SYMBOL(pfm_mod_write_pmcs
);
3454 pfm_mod_read_pmds(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3458 if (req
== NULL
) return -EINVAL
;
3460 ctx
= GET_PMU_CTX();
3462 if (ctx
== NULL
) return -EINVAL
;
3465 * for now limit to current task, which is enough when calling
3466 * from overflow handler
3468 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3470 return pfm_read_pmds(ctx
, req
, nreq
, regs
);
3472 EXPORT_SYMBOL(pfm_mod_read_pmds
);
3475 * Only call this function when a process it trying to
3476 * write the debug registers (reading is always allowed)
3479 pfm_use_debug_registers(struct task_struct
*task
)
3481 pfm_context_t
*ctx
= task
->thread
.pfm_context
;
3482 unsigned long flags
;
3485 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3487 DPRINT(("called for [%d]\n", task
->pid
));
3492 if (task
->thread
.flags
& IA64_THREAD_DBG_VALID
) return 0;
3495 * Even on SMP, we do not need to use an atomic here because
3496 * the only way in is via ptrace() and this is possible only when the
3497 * process is stopped. Even in the case where the ctxsw out is not totally
3498 * completed by the time we come here, there is no way the 'stopped' process
3499 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3500 * So this is always safe.
3502 if (ctx
&& ctx
->ctx_fl_using_dbreg
== 1) return -1;
3507 * We cannot allow setting breakpoints when system wide monitoring
3508 * sessions are using the debug registers.
3510 if (pfm_sessions
.pfs_sys_use_dbregs
> 0)
3513 pfm_sessions
.pfs_ptrace_use_dbregs
++;
3515 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3516 pfm_sessions
.pfs_ptrace_use_dbregs
,
3517 pfm_sessions
.pfs_sys_use_dbregs
,
3526 * This function is called for every task that exits with the
3527 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3528 * able to use the debug registers for debugging purposes via
3529 * ptrace(). Therefore we know it was not using them for
3530 * perfmormance monitoring, so we only decrement the number
3531 * of "ptraced" debug register users to keep the count up to date
3534 pfm_release_debug_registers(struct task_struct
*task
)
3536 unsigned long flags
;
3539 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3542 if (pfm_sessions
.pfs_ptrace_use_dbregs
== 0) {
3543 printk(KERN_ERR
"perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task
->pid
);
3546 pfm_sessions
.pfs_ptrace_use_dbregs
--;
3555 pfm_restart(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3557 struct task_struct
*task
;
3558 pfm_buffer_fmt_t
*fmt
;
3559 pfm_ovfl_ctrl_t rst_ctrl
;
3560 int state
, is_system
;
3563 state
= ctx
->ctx_state
;
3564 fmt
= ctx
->ctx_buf_fmt
;
3565 is_system
= ctx
->ctx_fl_system
;
3566 task
= PFM_CTX_TASK(ctx
);
3569 case PFM_CTX_MASKED
:
3571 case PFM_CTX_LOADED
:
3572 if (CTX_HAS_SMPL(ctx
) && fmt
->fmt_restart_active
) break;
3574 case PFM_CTX_UNLOADED
:
3575 case PFM_CTX_ZOMBIE
:
3576 DPRINT(("invalid state=%d\n", state
));
3579 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state
));
3584 * In system wide and when the context is loaded, access can only happen
3585 * when the caller is running on the CPU being monitored by the session.
3586 * It does not have to be the owner (ctx_task) of the context per se.
3588 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
3589 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3594 if (unlikely(task
== NULL
)) {
3595 printk(KERN_ERR
"perfmon: [%d] pfm_restart no task\n", current
->pid
);
3599 if (task
== current
|| is_system
) {
3601 fmt
= ctx
->ctx_buf_fmt
;
3603 DPRINT(("restarting self %d ovfl=0x%lx\n",
3605 ctx
->ctx_ovfl_regs
[0]));
3607 if (CTX_HAS_SMPL(ctx
)) {
3609 prefetch(ctx
->ctx_smpl_hdr
);
3611 rst_ctrl
.bits
.mask_monitoring
= 0;
3612 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
3614 if (state
== PFM_CTX_LOADED
)
3615 ret
= pfm_buf_fmt_restart_active(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3617 ret
= pfm_buf_fmt_restart(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3619 rst_ctrl
.bits
.mask_monitoring
= 0;
3620 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
3624 if (rst_ctrl
.bits
.reset_ovfl_pmds
)
3625 pfm_reset_regs(ctx
, ctx
->ctx_ovfl_regs
, PFM_PMD_LONG_RESET
);
3627 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
3628 DPRINT(("resuming monitoring for [%d]\n", task
->pid
));
3630 if (state
== PFM_CTX_MASKED
) pfm_restore_monitoring(task
);
3632 DPRINT(("keeping monitoring stopped for [%d]\n", task
->pid
));
3634 // cannot use pfm_stop_monitoring(task, regs);
3638 * clear overflowed PMD mask to remove any stale information
3640 ctx
->ctx_ovfl_regs
[0] = 0UL;
3643 * back to LOADED state
3645 ctx
->ctx_state
= PFM_CTX_LOADED
;
3648 * XXX: not really useful for self monitoring
3650 ctx
->ctx_fl_can_restart
= 0;
3656 * restart another task
3660 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3661 * one is seen by the task.
3663 if (state
== PFM_CTX_MASKED
) {
3664 if (ctx
->ctx_fl_can_restart
== 0) return -EINVAL
;
3666 * will prevent subsequent restart before this one is
3667 * seen by other task
3669 ctx
->ctx_fl_can_restart
= 0;
3673 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3674 * the task is blocked or on its way to block. That's the normal
3675 * restart path. If the monitoring is not masked, then the task
3676 * can be actively monitoring and we cannot directly intervene.
3677 * Therefore we use the trap mechanism to catch the task and
3678 * force it to reset the buffer/reset PMDs.
3680 * if non-blocking, then we ensure that the task will go into
3681 * pfm_handle_work() before returning to user mode.
3683 * We cannot explicitely reset another task, it MUST always
3684 * be done by the task itself. This works for system wide because
3685 * the tool that is controlling the session is logically doing
3686 * "self-monitoring".
3688 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && state
== PFM_CTX_MASKED
) {
3689 DPRINT(("unblocking [%d] \n", task
->pid
));
3690 complete(&ctx
->ctx_restart_done
);
3692 DPRINT(("[%d] armed exit trap\n", task
->pid
));
3694 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_RESET
;
3696 PFM_SET_WORK_PENDING(task
, 1);
3698 pfm_set_task_notify(task
);
3701 * XXX: send reschedule if task runs on another CPU
3708 pfm_debug(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3710 unsigned int m
= *(unsigned int *)arg
;
3712 pfm_sysctl
.debug
= m
== 0 ? 0 : 1;
3714 printk(KERN_INFO
"perfmon debugging %s (timing reset)\n", pfm_sysctl
.debug
? "on" : "off");
3717 memset(pfm_stats
, 0, sizeof(pfm_stats
));
3718 for(m
=0; m
< NR_CPUS
; m
++) pfm_stats
[m
].pfm_ovfl_intr_cycles_min
= ~0UL;
3724 * arg can be NULL and count can be zero for this function
3727 pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3729 struct thread_struct
*thread
= NULL
;
3730 struct task_struct
*task
;
3731 pfarg_dbreg_t
*req
= (pfarg_dbreg_t
*)arg
;
3732 unsigned long flags
;
3737 int i
, can_access_pmu
= 0;
3738 int is_system
, is_loaded
;
3740 if (pmu_conf
->use_rr_dbregs
== 0) return -EINVAL
;
3742 state
= ctx
->ctx_state
;
3743 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3744 is_system
= ctx
->ctx_fl_system
;
3745 task
= ctx
->ctx_task
;
3747 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3750 * on both UP and SMP, we can only write to the PMC when the task is
3751 * the owner of the local PMU.
3754 thread
= &task
->thread
;
3756 * In system wide and when the context is loaded, access can only happen
3757 * when the caller is running on the CPU being monitored by the session.
3758 * It does not have to be the owner (ctx_task) of the context per se.
3760 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3761 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3764 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3768 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3769 * ensuring that no real breakpoint can be installed via this call.
3771 * IMPORTANT: regs can be NULL in this function
3774 first_time
= ctx
->ctx_fl_using_dbreg
== 0;
3777 * don't bother if we are loaded and task is being debugged
3779 if (is_loaded
&& (thread
->flags
& IA64_THREAD_DBG_VALID
) != 0) {
3780 DPRINT(("debug registers already in use for [%d]\n", task
->pid
));
3785 * check for debug registers in system wide mode
3787 * If though a check is done in pfm_context_load(),
3788 * we must repeat it here, in case the registers are
3789 * written after the context is loaded
3794 if (first_time
&& is_system
) {
3795 if (pfm_sessions
.pfs_ptrace_use_dbregs
)
3798 pfm_sessions
.pfs_sys_use_dbregs
++;
3803 if (ret
!= 0) return ret
;
3806 * mark ourself as user of the debug registers for
3809 ctx
->ctx_fl_using_dbreg
= 1;
3812 * clear hardware registers to make sure we don't
3813 * pick up stale state.
3815 * for a system wide session, we do not use
3816 * thread.dbr, thread.ibr because this process
3817 * never leaves the current CPU and the state
3818 * is shared by all processes running on it
3820 if (first_time
&& can_access_pmu
) {
3821 DPRINT(("[%d] clearing ibrs, dbrs\n", task
->pid
));
3822 for (i
=0; i
< pmu_conf
->num_ibrs
; i
++) {
3823 ia64_set_ibr(i
, 0UL);
3824 ia64_dv_serialize_instruction();
3827 for (i
=0; i
< pmu_conf
->num_dbrs
; i
++) {
3828 ia64_set_dbr(i
, 0UL);
3829 ia64_dv_serialize_data();
3835 * Now install the values into the registers
3837 for (i
= 0; i
< count
; i
++, req
++) {
3839 rnum
= req
->dbreg_num
;
3840 dbreg
.val
= req
->dbreg_value
;
3844 if ((mode
== PFM_CODE_RR
&& rnum
>= PFM_NUM_IBRS
) || ((mode
== PFM_DATA_RR
) && rnum
>= PFM_NUM_DBRS
)) {
3845 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3846 rnum
, dbreg
.val
, mode
, i
, count
));
3852 * make sure we do not install enabled breakpoint
3855 if (mode
== PFM_CODE_RR
)
3856 dbreg
.ibr
.ibr_x
= 0;
3858 dbreg
.dbr
.dbr_r
= dbreg
.dbr
.dbr_w
= 0;
3861 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, 0);
3864 * Debug registers, just like PMC, can only be modified
3865 * by a kernel call. Moreover, perfmon() access to those
3866 * registers are centralized in this routine. The hardware
3867 * does not modify the value of these registers, therefore,
3868 * if we save them as they are written, we can avoid having
3869 * to save them on context switch out. This is made possible
3870 * by the fact that when perfmon uses debug registers, ptrace()
3871 * won't be able to modify them concurrently.
3873 if (mode
== PFM_CODE_RR
) {
3874 CTX_USED_IBR(ctx
, rnum
);
3876 if (can_access_pmu
) {
3877 ia64_set_ibr(rnum
, dbreg
.val
);
3878 ia64_dv_serialize_instruction();
3881 ctx
->ctx_ibrs
[rnum
] = dbreg
.val
;
3883 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3884 rnum
, dbreg
.val
, ctx
->ctx_used_ibrs
[0], is_loaded
, can_access_pmu
));
3886 CTX_USED_DBR(ctx
, rnum
);
3888 if (can_access_pmu
) {
3889 ia64_set_dbr(rnum
, dbreg
.val
);
3890 ia64_dv_serialize_data();
3892 ctx
->ctx_dbrs
[rnum
] = dbreg
.val
;
3894 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3895 rnum
, dbreg
.val
, ctx
->ctx_used_dbrs
[0], is_loaded
, can_access_pmu
));
3903 * in case it was our first attempt, we undo the global modifications
3907 if (ctx
->ctx_fl_system
) {
3908 pfm_sessions
.pfs_sys_use_dbregs
--;
3911 ctx
->ctx_fl_using_dbreg
= 0;
3914 * install error return flag
3916 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, PFM_REG_RETFL_EINVAL
);
3922 pfm_write_ibrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3924 return pfm_write_ibr_dbr(PFM_CODE_RR
, ctx
, arg
, count
, regs
);
3928 pfm_write_dbrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3930 return pfm_write_ibr_dbr(PFM_DATA_RR
, ctx
, arg
, count
, regs
);
3934 pfm_mod_write_ibrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3938 if (req
== NULL
) return -EINVAL
;
3940 ctx
= GET_PMU_CTX();
3942 if (ctx
== NULL
) return -EINVAL
;
3945 * for now limit to current task, which is enough when calling
3946 * from overflow handler
3948 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3950 return pfm_write_ibrs(ctx
, req
, nreq
, regs
);
3952 EXPORT_SYMBOL(pfm_mod_write_ibrs
);
3955 pfm_mod_write_dbrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3959 if (req
== NULL
) return -EINVAL
;
3961 ctx
= GET_PMU_CTX();
3963 if (ctx
== NULL
) return -EINVAL
;
3966 * for now limit to current task, which is enough when calling
3967 * from overflow handler
3969 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3971 return pfm_write_dbrs(ctx
, req
, nreq
, regs
);
3973 EXPORT_SYMBOL(pfm_mod_write_dbrs
);
3977 pfm_get_features(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3979 pfarg_features_t
*req
= (pfarg_features_t
*)arg
;
3981 req
->ft_version
= PFM_VERSION
;
3986 pfm_stop(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3988 struct pt_regs
*tregs
;
3989 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
3990 int state
, is_system
;
3992 state
= ctx
->ctx_state
;
3993 is_system
= ctx
->ctx_fl_system
;
3996 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3998 if (state
== PFM_CTX_UNLOADED
) return -EINVAL
;
4001 * In system wide and when the context is loaded, access can only happen
4002 * when the caller is running on the CPU being monitored by the session.
4003 * It does not have to be the owner (ctx_task) of the context per se.
4005 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
4006 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
4009 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4010 PFM_CTX_TASK(ctx
)->pid
,
4014 * in system mode, we need to update the PMU directly
4015 * and the user level state of the caller, which may not
4016 * necessarily be the creator of the context.
4020 * Update local PMU first
4024 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
4028 * update local cpuinfo
4030 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4033 * stop monitoring, does srlz.i
4038 * stop monitoring in the caller
4040 ia64_psr(regs
)->pp
= 0;
4048 if (task
== current
) {
4049 /* stop monitoring at kernel level */
4053 * stop monitoring at the user level
4055 ia64_psr(regs
)->up
= 0;
4057 tregs
= task_pt_regs(task
);
4060 * stop monitoring at the user level
4062 ia64_psr(tregs
)->up
= 0;
4065 * monitoring disabled in kernel at next reschedule
4067 ctx
->ctx_saved_psr_up
= 0;
4068 DPRINT(("task=[%d]\n", task
->pid
));
4075 pfm_start(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4077 struct pt_regs
*tregs
;
4078 int state
, is_system
;
4080 state
= ctx
->ctx_state
;
4081 is_system
= ctx
->ctx_fl_system
;
4083 if (state
!= PFM_CTX_LOADED
) return -EINVAL
;
4086 * In system wide and when the context is loaded, access can only happen
4087 * when the caller is running on the CPU being monitored by the session.
4088 * It does not have to be the owner (ctx_task) of the context per se.
4090 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
4091 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
4096 * in system mode, we need to update the PMU directly
4097 * and the user level state of the caller, which may not
4098 * necessarily be the creator of the context.
4103 * set user level psr.pp for the caller
4105 ia64_psr(regs
)->pp
= 1;
4108 * now update the local PMU and cpuinfo
4110 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP
);
4113 * start monitoring at kernel level
4118 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
4128 if (ctx
->ctx_task
== current
) {
4130 /* start monitoring at kernel level */
4134 * activate monitoring at user level
4136 ia64_psr(regs
)->up
= 1;
4139 tregs
= task_pt_regs(ctx
->ctx_task
);
4142 * start monitoring at the kernel level the next
4143 * time the task is scheduled
4145 ctx
->ctx_saved_psr_up
= IA64_PSR_UP
;
4148 * activate monitoring at user level
4150 ia64_psr(tregs
)->up
= 1;
4156 pfm_get_pmc_reset(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4158 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
4163 for (i
= 0; i
< count
; i
++, req
++) {
4165 cnum
= req
->reg_num
;
4167 if (!PMC_IS_IMPL(cnum
)) goto abort_mission
;
4169 req
->reg_value
= PMC_DFL_VAL(cnum
);
4171 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
4173 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum
, req
->reg_value
));
4178 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
4183 pfm_check_task_exist(pfm_context_t
*ctx
)
4185 struct task_struct
*g
, *t
;
4188 read_lock(&tasklist_lock
);
4190 do_each_thread (g
, t
) {
4191 if (t
->thread
.pfm_context
== ctx
) {
4195 } while_each_thread (g
, t
);
4197 read_unlock(&tasklist_lock
);
4199 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret
, ctx
));
4205 pfm_context_load(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4207 struct task_struct
*task
;
4208 struct thread_struct
*thread
;
4209 struct pfm_context_t
*old
;
4210 unsigned long flags
;
4212 struct task_struct
*owner_task
= NULL
;
4214 pfarg_load_t
*req
= (pfarg_load_t
*)arg
;
4215 unsigned long *pmcs_source
, *pmds_source
;
4218 int state
, is_system
, set_dbregs
= 0;
4220 state
= ctx
->ctx_state
;
4221 is_system
= ctx
->ctx_fl_system
;
4223 * can only load from unloaded or terminated state
4225 if (state
!= PFM_CTX_UNLOADED
) {
4226 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4232 DPRINT(("load_pid [%d] using_dbreg=%d\n", req
->load_pid
, ctx
->ctx_fl_using_dbreg
));
4234 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && req
->load_pid
== current
->pid
) {
4235 DPRINT(("cannot use blocking mode on self\n"));
4239 ret
= pfm_get_task(ctx
, req
->load_pid
, &task
);
4241 DPRINT(("load_pid [%d] get_task=%d\n", req
->load_pid
, ret
));
4248 * system wide is self monitoring only
4250 if (is_system
&& task
!= current
) {
4251 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4256 thread
= &task
->thread
;
4260 * cannot load a context which is using range restrictions,
4261 * into a task that is being debugged.
4263 if (ctx
->ctx_fl_using_dbreg
) {
4264 if (thread
->flags
& IA64_THREAD_DBG_VALID
) {
4266 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req
->load_pid
));
4272 if (pfm_sessions
.pfs_ptrace_use_dbregs
) {
4273 DPRINT(("cannot load [%d] dbregs in use\n", task
->pid
));
4276 pfm_sessions
.pfs_sys_use_dbregs
++;
4277 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task
->pid
, pfm_sessions
.pfs_sys_use_dbregs
));
4284 if (ret
) goto error
;
4288 * SMP system-wide monitoring implies self-monitoring.
4290 * The programming model expects the task to
4291 * be pinned on a CPU throughout the session.
4292 * Here we take note of the current CPU at the
4293 * time the context is loaded. No call from
4294 * another CPU will be allowed.
4296 * The pinning via shed_setaffinity()
4297 * must be done by the calling task prior
4300 * systemwide: keep track of CPU this session is supposed to run on
4302 the_cpu
= ctx
->ctx_cpu
= smp_processor_id();
4306 * now reserve the session
4308 ret
= pfm_reserve_session(current
, is_system
, the_cpu
);
4309 if (ret
) goto error
;
4312 * task is necessarily stopped at this point.
4314 * If the previous context was zombie, then it got removed in
4315 * pfm_save_regs(). Therefore we should not see it here.
4316 * If we see a context, then this is an active context
4318 * XXX: needs to be atomic
4320 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4321 thread
->pfm_context
, ctx
));
4324 old
= ia64_cmpxchg(acq
, &thread
->pfm_context
, NULL
, ctx
, sizeof(pfm_context_t
*));
4326 DPRINT(("load_pid [%d] already has a context\n", req
->load_pid
));
4330 pfm_reset_msgq(ctx
);
4332 ctx
->ctx_state
= PFM_CTX_LOADED
;
4335 * link context to task
4337 ctx
->ctx_task
= task
;
4341 * we load as stopped
4343 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE
);
4344 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4346 if (ctx
->ctx_fl_excl_idle
) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE
);
4348 thread
->flags
|= IA64_THREAD_PM_VALID
;
4352 * propagate into thread-state
4354 pfm_copy_pmds(task
, ctx
);
4355 pfm_copy_pmcs(task
, ctx
);
4357 pmcs_source
= thread
->pmcs
;
4358 pmds_source
= thread
->pmds
;
4361 * always the case for system-wide
4363 if (task
== current
) {
4365 if (is_system
== 0) {
4367 /* allow user level control */
4368 ia64_psr(regs
)->sp
= 0;
4369 DPRINT(("clearing psr.sp for [%d]\n", task
->pid
));
4371 SET_LAST_CPU(ctx
, smp_processor_id());
4373 SET_ACTIVATION(ctx
);
4376 * push the other task out, if any
4378 owner_task
= GET_PMU_OWNER();
4379 if (owner_task
) pfm_lazy_save_regs(owner_task
);
4383 * load all PMD from ctx to PMU (as opposed to thread state)
4384 * restore all PMC from ctx to PMU
4386 pfm_restore_pmds(pmds_source
, ctx
->ctx_all_pmds
[0]);
4387 pfm_restore_pmcs(pmcs_source
, ctx
->ctx_all_pmcs
[0]);
4389 ctx
->ctx_reload_pmcs
[0] = 0UL;
4390 ctx
->ctx_reload_pmds
[0] = 0UL;
4393 * guaranteed safe by earlier check against DBG_VALID
4395 if (ctx
->ctx_fl_using_dbreg
) {
4396 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
4397 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
4402 SET_PMU_OWNER(task
, ctx
);
4404 DPRINT(("context loaded on PMU for [%d]\n", task
->pid
));
4407 * when not current, task MUST be stopped, so this is safe
4409 regs
= task_pt_regs(task
);
4411 /* force a full reload */
4412 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4413 SET_LAST_CPU(ctx
, -1);
4415 /* initial saved psr (stopped) */
4416 ctx
->ctx_saved_psr_up
= 0UL;
4417 ia64_psr(regs
)->up
= ia64_psr(regs
)->pp
= 0;
4423 if (ret
) pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, the_cpu
);
4426 * we must undo the dbregs setting (for system-wide)
4428 if (ret
&& set_dbregs
) {
4430 pfm_sessions
.pfs_sys_use_dbregs
--;
4434 * release task, there is now a link with the context
4436 if (is_system
== 0 && task
!= current
) {
4440 ret
= pfm_check_task_exist(ctx
);
4442 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4443 ctx
->ctx_task
= NULL
;
4451 * in this function, we do not need to increase the use count
4452 * for the task via get_task_struct(), because we hold the
4453 * context lock. If the task were to disappear while having
4454 * a context attached, it would go through pfm_exit_thread()
4455 * which also grabs the context lock and would therefore be blocked
4456 * until we are here.
4458 static void pfm_flush_pmds(struct task_struct
*, pfm_context_t
*ctx
);
4461 pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4463 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
4464 struct pt_regs
*tregs
;
4465 int prev_state
, is_system
;
4468 DPRINT(("ctx_state=%d task [%d]\n", ctx
->ctx_state
, task
? task
->pid
: -1));
4470 prev_state
= ctx
->ctx_state
;
4471 is_system
= ctx
->ctx_fl_system
;
4474 * unload only when necessary
4476 if (prev_state
== PFM_CTX_UNLOADED
) {
4477 DPRINT(("ctx_state=%d, nothing to do\n", prev_state
));
4482 * clear psr and dcr bits
4484 ret
= pfm_stop(ctx
, NULL
, 0, regs
);
4485 if (ret
) return ret
;
4487 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4490 * in system mode, we need to update the PMU directly
4491 * and the user level state of the caller, which may not
4492 * necessarily be the creator of the context.
4499 * local PMU is taken care of in pfm_stop()
4501 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE
);
4502 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE
);
4505 * save PMDs in context
4508 pfm_flush_pmds(current
, ctx
);
4511 * at this point we are done with the PMU
4512 * so we can unreserve the resource.
4514 if (prev_state
!= PFM_CTX_ZOMBIE
)
4515 pfm_unreserve_session(ctx
, 1 , ctx
->ctx_cpu
);
4518 * disconnect context from task
4520 task
->thread
.pfm_context
= NULL
;
4522 * disconnect task from context
4524 ctx
->ctx_task
= NULL
;
4527 * There is nothing more to cleanup here.
4535 tregs
= task
== current
? regs
: task_pt_regs(task
);
4537 if (task
== current
) {
4539 * cancel user level control
4541 ia64_psr(regs
)->sp
= 1;
4543 DPRINT(("setting psr.sp for [%d]\n", task
->pid
));
4546 * save PMDs to context
4549 pfm_flush_pmds(task
, ctx
);
4552 * at this point we are done with the PMU
4553 * so we can unreserve the resource.
4555 * when state was ZOMBIE, we have already unreserved.
4557 if (prev_state
!= PFM_CTX_ZOMBIE
)
4558 pfm_unreserve_session(ctx
, 0 , ctx
->ctx_cpu
);
4561 * reset activation counter and psr
4563 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4564 SET_LAST_CPU(ctx
, -1);
4567 * PMU state will not be restored
4569 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
4572 * break links between context and task
4574 task
->thread
.pfm_context
= NULL
;
4575 ctx
->ctx_task
= NULL
;
4577 PFM_SET_WORK_PENDING(task
, 0);
4579 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
4580 ctx
->ctx_fl_can_restart
= 0;
4581 ctx
->ctx_fl_going_zombie
= 0;
4583 DPRINT(("disconnected [%d] from context\n", task
->pid
));
4590 * called only from exit_thread(): task == current
4591 * we come here only if current has a context attached (loaded or masked)
4594 pfm_exit_thread(struct task_struct
*task
)
4597 unsigned long flags
;
4598 struct pt_regs
*regs
= task_pt_regs(task
);
4602 ctx
= PFM_GET_CTX(task
);
4604 PROTECT_CTX(ctx
, flags
);
4606 DPRINT(("state=%d task [%d]\n", ctx
->ctx_state
, task
->pid
));
4608 state
= ctx
->ctx_state
;
4610 case PFM_CTX_UNLOADED
:
4612 * only comes to thios function if pfm_context is not NULL, i.e., cannot
4613 * be in unloaded state
4615 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] ctx unloaded\n", task
->pid
);
4617 case PFM_CTX_LOADED
:
4618 case PFM_CTX_MASKED
:
4619 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4621 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task
->pid
, state
, ret
);
4623 DPRINT(("ctx unloaded for current state was %d\n", state
));
4625 pfm_end_notify_user(ctx
);
4627 case PFM_CTX_ZOMBIE
:
4628 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4630 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task
->pid
, state
, ret
);
4635 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task
->pid
, state
);
4638 UNPROTECT_CTX(ctx
, flags
);
4640 { u64 psr
= pfm_get_psr();
4641 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
4642 BUG_ON(GET_PMU_OWNER());
4643 BUG_ON(ia64_psr(regs
)->up
);
4644 BUG_ON(ia64_psr(regs
)->pp
);
4648 * All memory free operations (especially for vmalloc'ed memory)
4649 * MUST be done with interrupts ENABLED.
4651 if (free_ok
) pfm_context_free(ctx
);
4655 * functions MUST be listed in the increasing order of their index (see permfon.h)
4657 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4658 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4659 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4660 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4661 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4663 static pfm_cmd_desc_t pfm_cmd_tab
[]={
4664 /* 0 */PFM_CMD_NONE
,
4665 /* 1 */PFM_CMD(pfm_write_pmcs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4666 /* 2 */PFM_CMD(pfm_write_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4667 /* 3 */PFM_CMD(pfm_read_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4668 /* 4 */PFM_CMD_S(pfm_stop
, PFM_CMD_PCLRWS
),
4669 /* 5 */PFM_CMD_S(pfm_start
, PFM_CMD_PCLRWS
),
4670 /* 6 */PFM_CMD_NONE
,
4671 /* 7 */PFM_CMD_NONE
,
4672 /* 8 */PFM_CMD(pfm_context_create
, PFM_CMD_ARG_RW
, 1, pfarg_context_t
, pfm_ctx_getsize
),
4673 /* 9 */PFM_CMD_NONE
,
4674 /* 10 */PFM_CMD_S(pfm_restart
, PFM_CMD_PCLRW
),
4675 /* 11 */PFM_CMD_NONE
,
4676 /* 12 */PFM_CMD(pfm_get_features
, PFM_CMD_ARG_RW
, 1, pfarg_features_t
, NULL
),
4677 /* 13 */PFM_CMD(pfm_debug
, 0, 1, unsigned int, NULL
),
4678 /* 14 */PFM_CMD_NONE
,
4679 /* 15 */PFM_CMD(pfm_get_pmc_reset
, PFM_CMD_ARG_RW
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4680 /* 16 */PFM_CMD(pfm_context_load
, PFM_CMD_PCLRWS
, 1, pfarg_load_t
, NULL
),
4681 /* 17 */PFM_CMD_S(pfm_context_unload
, PFM_CMD_PCLRWS
),
4682 /* 18 */PFM_CMD_NONE
,
4683 /* 19 */PFM_CMD_NONE
,
4684 /* 20 */PFM_CMD_NONE
,
4685 /* 21 */PFM_CMD_NONE
,
4686 /* 22 */PFM_CMD_NONE
,
4687 /* 23 */PFM_CMD_NONE
,
4688 /* 24 */PFM_CMD_NONE
,
4689 /* 25 */PFM_CMD_NONE
,
4690 /* 26 */PFM_CMD_NONE
,
4691 /* 27 */PFM_CMD_NONE
,
4692 /* 28 */PFM_CMD_NONE
,
4693 /* 29 */PFM_CMD_NONE
,
4694 /* 30 */PFM_CMD_NONE
,
4695 /* 31 */PFM_CMD_NONE
,
4696 /* 32 */PFM_CMD(pfm_write_ibrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
),
4697 /* 33 */PFM_CMD(pfm_write_dbrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
)
4699 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4702 pfm_check_task_state(pfm_context_t
*ctx
, int cmd
, unsigned long flags
)
4704 struct task_struct
*task
;
4705 int state
, old_state
;
4708 state
= ctx
->ctx_state
;
4709 task
= ctx
->ctx_task
;
4712 DPRINT(("context %d no task, state=%d\n", ctx
->ctx_fd
, state
));
4716 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4720 task
->state
, PFM_CMD_STOPPED(cmd
)));
4723 * self-monitoring always ok.
4725 * for system-wide the caller can either be the creator of the
4726 * context (to one to which the context is attached to) OR
4727 * a task running on the same CPU as the session.
4729 if (task
== current
|| ctx
->ctx_fl_system
) return 0;
4732 * we are monitoring another thread
4735 case PFM_CTX_UNLOADED
:
4737 * if context is UNLOADED we are safe to go
4740 case PFM_CTX_ZOMBIE
:
4742 * no command can operate on a zombie context
4744 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd
));
4746 case PFM_CTX_MASKED
:
4748 * PMU state has been saved to software even though
4749 * the thread may still be running.
4751 if (cmd
!= PFM_UNLOAD_CONTEXT
) return 0;
4755 * context is LOADED or MASKED. Some commands may need to have
4758 * We could lift this restriction for UP but it would mean that
4759 * the user has no guarantee the task would not run between
4760 * two successive calls to perfmonctl(). That's probably OK.
4761 * If this user wants to ensure the task does not run, then
4762 * the task must be stopped.
4764 if (PFM_CMD_STOPPED(cmd
)) {
4765 if ((task
->state
!= TASK_STOPPED
) && (task
->state
!= TASK_TRACED
)) {
4766 DPRINT(("[%d] task not in stopped state\n", task
->pid
));
4770 * task is now stopped, wait for ctxsw out
4772 * This is an interesting point in the code.
4773 * We need to unprotect the context because
4774 * the pfm_save_regs() routines needs to grab
4775 * the same lock. There are danger in doing
4776 * this because it leaves a window open for
4777 * another task to get access to the context
4778 * and possibly change its state. The one thing
4779 * that is not possible is for the context to disappear
4780 * because we are protected by the VFS layer, i.e.,
4781 * get_fd()/put_fd().
4785 UNPROTECT_CTX(ctx
, flags
);
4787 wait_task_inactive(task
);
4789 PROTECT_CTX(ctx
, flags
);
4792 * we must recheck to verify if state has changed
4794 if (ctx
->ctx_state
!= old_state
) {
4795 DPRINT(("old_state=%d new_state=%d\n", old_state
, ctx
->ctx_state
));
4803 * system-call entry point (must return long)
4806 sys_perfmonctl (int fd
, int cmd
, void __user
*arg
, int count
)
4808 struct file
*file
= NULL
;
4809 pfm_context_t
*ctx
= NULL
;
4810 unsigned long flags
= 0UL;
4811 void *args_k
= NULL
;
4812 long ret
; /* will expand int return types */
4813 size_t base_sz
, sz
, xtra_sz
= 0;
4814 int narg
, completed_args
= 0, call_made
= 0, cmd_flags
;
4815 int (*func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
4816 int (*getsize
)(void *arg
, size_t *sz
);
4817 #define PFM_MAX_ARGSIZE 4096
4820 * reject any call if perfmon was disabled at initialization
4822 if (unlikely(pmu_conf
== NULL
)) return -ENOSYS
;
4824 if (unlikely(cmd
< 0 || cmd
>= PFM_CMD_COUNT
)) {
4825 DPRINT(("invalid cmd=%d\n", cmd
));
4829 func
= pfm_cmd_tab
[cmd
].cmd_func
;
4830 narg
= pfm_cmd_tab
[cmd
].cmd_narg
;
4831 base_sz
= pfm_cmd_tab
[cmd
].cmd_argsize
;
4832 getsize
= pfm_cmd_tab
[cmd
].cmd_getsize
;
4833 cmd_flags
= pfm_cmd_tab
[cmd
].cmd_flags
;
4835 if (unlikely(func
== NULL
)) {
4836 DPRINT(("invalid cmd=%d\n", cmd
));
4840 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4848 * check if number of arguments matches what the command expects
4850 if (unlikely((narg
== PFM_CMD_ARG_MANY
&& count
<= 0) || (narg
> 0 && narg
!= count
)))
4854 sz
= xtra_sz
+ base_sz
*count
;
4856 * limit abuse to min page size
4858 if (unlikely(sz
> PFM_MAX_ARGSIZE
)) {
4859 printk(KERN_ERR
"perfmon: [%d] argument too big %lu\n", current
->pid
, sz
);
4864 * allocate default-sized argument buffer
4866 if (likely(count
&& args_k
== NULL
)) {
4867 args_k
= kmalloc(PFM_MAX_ARGSIZE
, GFP_KERNEL
);
4868 if (args_k
== NULL
) return -ENOMEM
;
4876 * assume sz = 0 for command without parameters
4878 if (sz
&& copy_from_user(args_k
, arg
, sz
)) {
4879 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz
, arg
));
4884 * check if command supports extra parameters
4886 if (completed_args
== 0 && getsize
) {
4888 * get extra parameters size (based on main argument)
4890 ret
= (*getsize
)(args_k
, &xtra_sz
);
4891 if (ret
) goto error_args
;
4895 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz
, xtra_sz
));
4897 /* retry if necessary */
4898 if (likely(xtra_sz
)) goto restart_args
;
4901 if (unlikely((cmd_flags
& PFM_CMD_FD
) == 0)) goto skip_fd
;
4906 if (unlikely(file
== NULL
)) {
4907 DPRINT(("invalid fd %d\n", fd
));
4910 if (unlikely(PFM_IS_FILE(file
) == 0)) {
4911 DPRINT(("fd %d not related to perfmon\n", fd
));
4915 ctx
= (pfm_context_t
*)file
->private_data
;
4916 if (unlikely(ctx
== NULL
)) {
4917 DPRINT(("no context for fd %d\n", fd
));
4920 prefetch(&ctx
->ctx_state
);
4922 PROTECT_CTX(ctx
, flags
);
4925 * check task is stopped
4927 ret
= pfm_check_task_state(ctx
, cmd
, flags
);
4928 if (unlikely(ret
)) goto abort_locked
;
4931 ret
= (*func
)(ctx
, args_k
, count
, task_pt_regs(current
));
4937 DPRINT(("context unlocked\n"));
4938 UNPROTECT_CTX(ctx
, flags
);
4941 /* copy argument back to user, if needed */
4942 if (call_made
&& PFM_CMD_RW_ARG(cmd
) && copy_to_user(arg
, args_k
, base_sz
*count
)) ret
= -EFAULT
;
4950 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd
), ret
));
4956 pfm_resume_after_ovfl(pfm_context_t
*ctx
, unsigned long ovfl_regs
, struct pt_regs
*regs
)
4958 pfm_buffer_fmt_t
*fmt
= ctx
->ctx_buf_fmt
;
4959 pfm_ovfl_ctrl_t rst_ctrl
;
4963 state
= ctx
->ctx_state
;
4965 * Unlock sampling buffer and reset index atomically
4966 * XXX: not really needed when blocking
4968 if (CTX_HAS_SMPL(ctx
)) {
4970 rst_ctrl
.bits
.mask_monitoring
= 0;
4971 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
4973 if (state
== PFM_CTX_LOADED
)
4974 ret
= pfm_buf_fmt_restart_active(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4976 ret
= pfm_buf_fmt_restart(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4978 rst_ctrl
.bits
.mask_monitoring
= 0;
4979 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
4983 if (rst_ctrl
.bits
.reset_ovfl_pmds
) {
4984 pfm_reset_regs(ctx
, &ovfl_regs
, PFM_PMD_LONG_RESET
);
4986 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
4987 DPRINT(("resuming monitoring\n"));
4988 if (ctx
->ctx_state
== PFM_CTX_MASKED
) pfm_restore_monitoring(current
);
4990 DPRINT(("stopping monitoring\n"));
4991 //pfm_stop_monitoring(current, regs);
4993 ctx
->ctx_state
= PFM_CTX_LOADED
;
4998 * context MUST BE LOCKED when calling
4999 * can only be called for current
5002 pfm_context_force_terminate(pfm_context_t
*ctx
, struct pt_regs
*regs
)
5006 DPRINT(("entering for [%d]\n", current
->pid
));
5008 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
5010 printk(KERN_ERR
"pfm_context_force_terminate: [%d] unloaded failed with %d\n", current
->pid
, ret
);
5014 * and wakeup controlling task, indicating we are now disconnected
5016 wake_up_interruptible(&ctx
->ctx_zombieq
);
5019 * given that context is still locked, the controlling
5020 * task will only get access when we return from
5021 * pfm_handle_work().
5025 static int pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
);
5027 * pfm_handle_work() can be called with interrupts enabled
5028 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5029 * call may sleep, therefore we must re-enable interrupts
5030 * to avoid deadlocks. It is safe to do so because this function
5031 * is called ONLY when returning to user level (PUStk=1), in which case
5032 * there is no risk of kernel stack overflow due to deep
5033 * interrupt nesting.
5036 pfm_handle_work(void)
5039 struct pt_regs
*regs
;
5040 unsigned long flags
, dummy_flags
;
5041 unsigned long ovfl_regs
;
5042 unsigned int reason
;
5045 ctx
= PFM_GET_CTX(current
);
5047 printk(KERN_ERR
"perfmon: [%d] has no PFM context\n", current
->pid
);
5051 PROTECT_CTX(ctx
, flags
);
5053 PFM_SET_WORK_PENDING(current
, 0);
5055 pfm_clear_task_notify();
5057 regs
= task_pt_regs(current
);
5060 * extract reason for being here and clear
5062 reason
= ctx
->ctx_fl_trap_reason
;
5063 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
5064 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5066 DPRINT(("reason=%d state=%d\n", reason
, ctx
->ctx_state
));
5069 * must be done before we check for simple-reset mode
5071 if (ctx
->ctx_fl_going_zombie
|| ctx
->ctx_state
== PFM_CTX_ZOMBIE
) goto do_zombie
;
5074 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5075 if (reason
== PFM_TRAP_REASON_RESET
) goto skip_blocking
;
5078 * restore interrupt mask to what it was on entry.
5079 * Could be enabled/diasbled.
5081 UNPROTECT_CTX(ctx
, flags
);
5084 * force interrupt enable because of down_interruptible()
5088 DPRINT(("before block sleeping\n"));
5091 * may go through without blocking on SMP systems
5092 * if restart has been received already by the time we call down()
5094 ret
= wait_for_completion_interruptible(&ctx
->ctx_restart_done
);
5096 DPRINT(("after block sleeping ret=%d\n", ret
));
5099 * lock context and mask interrupts again
5100 * We save flags into a dummy because we may have
5101 * altered interrupts mask compared to entry in this
5104 PROTECT_CTX(ctx
, dummy_flags
);
5107 * we need to read the ovfl_regs only after wake-up
5108 * because we may have had pfm_write_pmds() in between
5109 * and that can changed PMD values and therefore
5110 * ovfl_regs is reset for these new PMD values.
5112 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5114 if (ctx
->ctx_fl_going_zombie
) {
5116 DPRINT(("context is zombie, bailing out\n"));
5117 pfm_context_force_terminate(ctx
, regs
);
5121 * in case of interruption of down() we don't restart anything
5123 if (ret
< 0) goto nothing_to_do
;
5126 pfm_resume_after_ovfl(ctx
, ovfl_regs
, regs
);
5127 ctx
->ctx_ovfl_regs
[0] = 0UL;
5131 * restore flags as they were upon entry
5133 UNPROTECT_CTX(ctx
, flags
);
5137 pfm_notify_user(pfm_context_t
*ctx
, pfm_msg_t
*msg
)
5139 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5140 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5144 DPRINT(("waking up somebody\n"));
5146 if (msg
) wake_up_interruptible(&ctx
->ctx_msgq_wait
);
5149 * safe, we are not in intr handler, nor in ctxsw when
5152 kill_fasync (&ctx
->ctx_async_queue
, SIGIO
, POLL_IN
);
5158 pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
)
5160 pfm_msg_t
*msg
= NULL
;
5162 if (ctx
->ctx_fl_no_msg
== 0) {
5163 msg
= pfm_get_new_msg(ctx
);
5165 printk(KERN_ERR
"perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5169 msg
->pfm_ovfl_msg
.msg_type
= PFM_MSG_OVFL
;
5170 msg
->pfm_ovfl_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5171 msg
->pfm_ovfl_msg
.msg_active_set
= 0;
5172 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[0] = ovfl_pmds
;
5173 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[1] = 0UL;
5174 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[2] = 0UL;
5175 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[3] = 0UL;
5176 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5179 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5185 return pfm_notify_user(ctx
, msg
);
5189 pfm_end_notify_user(pfm_context_t
*ctx
)
5193 msg
= pfm_get_new_msg(ctx
);
5195 printk(KERN_ERR
"perfmon: pfm_end_notify_user no more notification msgs\n");
5199 memset(msg
, 0, sizeof(*msg
));
5201 msg
->pfm_end_msg
.msg_type
= PFM_MSG_END
;
5202 msg
->pfm_end_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5203 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5205 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5210 return pfm_notify_user(ctx
, msg
);
5214 * main overflow processing routine.
5215 * it can be called from the interrupt path or explicitely during the context switch code
5218 pfm_overflow_handler(struct task_struct
*task
, pfm_context_t
*ctx
, u64 pmc0
, struct pt_regs
*regs
)
5220 pfm_ovfl_arg_t
*ovfl_arg
;
5222 unsigned long old_val
, ovfl_val
, new_val
;
5223 unsigned long ovfl_notify
= 0UL, ovfl_pmds
= 0UL, smpl_pmds
= 0UL, reset_pmds
;
5224 unsigned long tstamp
;
5225 pfm_ovfl_ctrl_t ovfl_ctrl
;
5226 unsigned int i
, has_smpl
;
5227 int must_notify
= 0;
5229 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) goto stop_monitoring
;
5232 * sanity test. Should never happen
5234 if (unlikely((pmc0
& 0x1) == 0)) goto sanity_check
;
5236 tstamp
= ia64_get_itc();
5237 mask
= pmc0
>> PMU_FIRST_COUNTER
;
5238 ovfl_val
= pmu_conf
->ovfl_val
;
5239 has_smpl
= CTX_HAS_SMPL(ctx
);
5241 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5242 "used_pmds=0x%lx\n",
5244 task
? task
->pid
: -1,
5245 (regs
? regs
->cr_iip
: 0),
5246 CTX_OVFL_NOBLOCK(ctx
) ? "nonblocking" : "blocking",
5247 ctx
->ctx_used_pmds
[0]));
5251 * first we update the virtual counters
5252 * assume there was a prior ia64_srlz_d() issued
5254 for (i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
5256 /* skip pmd which did not overflow */
5257 if ((mask
& 0x1) == 0) continue;
5260 * Note that the pmd is not necessarily 0 at this point as qualified events
5261 * may have happened before the PMU was frozen. The residual count is not
5262 * taken into consideration here but will be with any read of the pmd via
5265 old_val
= new_val
= ctx
->ctx_pmds
[i
].val
;
5266 new_val
+= 1 + ovfl_val
;
5267 ctx
->ctx_pmds
[i
].val
= new_val
;
5270 * check for overflow condition
5272 if (likely(old_val
> new_val
)) {
5273 ovfl_pmds
|= 1UL << i
;
5274 if (PMC_OVFL_NOTIFY(ctx
, i
)) ovfl_notify
|= 1UL << i
;
5277 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5281 ia64_get_pmd(i
) & ovfl_val
,
5287 * there was no 64-bit overflow, nothing else to do
5289 if (ovfl_pmds
== 0UL) return;
5292 * reset all control bits
5298 * if a sampling format module exists, then we "cache" the overflow by
5299 * calling the module's handler() routine.
5302 unsigned long start_cycles
, end_cycles
;
5303 unsigned long pmd_mask
;
5305 int this_cpu
= smp_processor_id();
5307 pmd_mask
= ovfl_pmds
>> PMU_FIRST_COUNTER
;
5308 ovfl_arg
= &ctx
->ctx_ovfl_arg
;
5310 prefetch(ctx
->ctx_smpl_hdr
);
5312 for(i
=PMU_FIRST_COUNTER
; pmd_mask
&& ret
== 0; i
++, pmd_mask
>>=1) {
5316 if ((pmd_mask
& 0x1) == 0) continue;
5318 ovfl_arg
->ovfl_pmd
= (unsigned char )i
;
5319 ovfl_arg
->ovfl_notify
= ovfl_notify
& mask
? 1 : 0;
5320 ovfl_arg
->active_set
= 0;
5321 ovfl_arg
->ovfl_ctrl
.val
= 0; /* module must fill in all fields */
5322 ovfl_arg
->smpl_pmds
[0] = smpl_pmds
= ctx
->ctx_pmds
[i
].smpl_pmds
[0];
5324 ovfl_arg
->pmd_value
= ctx
->ctx_pmds
[i
].val
;
5325 ovfl_arg
->pmd_last_reset
= ctx
->ctx_pmds
[i
].lval
;
5326 ovfl_arg
->pmd_eventid
= ctx
->ctx_pmds
[i
].eventid
;
5329 * copy values of pmds of interest. Sampling format may copy them
5330 * into sampling buffer.
5333 for(j
=0, k
=0; smpl_pmds
; j
++, smpl_pmds
>>=1) {
5334 if ((smpl_pmds
& 0x1) == 0) continue;
5335 ovfl_arg
->smpl_pmds_values
[k
++] = PMD_IS_COUNTING(j
) ? pfm_read_soft_counter(ctx
, j
) : ia64_get_pmd(j
);
5336 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k
-1, j
, ovfl_arg
->smpl_pmds_values
[k
-1]));
5340 pfm_stats
[this_cpu
].pfm_smpl_handler_calls
++;
5342 start_cycles
= ia64_get_itc();
5345 * call custom buffer format record (handler) routine
5347 ret
= (*ctx
->ctx_buf_fmt
->fmt_handler
)(task
, ctx
->ctx_smpl_hdr
, ovfl_arg
, regs
, tstamp
);
5349 end_cycles
= ia64_get_itc();
5352 * For those controls, we take the union because they have
5353 * an all or nothing behavior.
5355 ovfl_ctrl
.bits
.notify_user
|= ovfl_arg
->ovfl_ctrl
.bits
.notify_user
;
5356 ovfl_ctrl
.bits
.block_task
|= ovfl_arg
->ovfl_ctrl
.bits
.block_task
;
5357 ovfl_ctrl
.bits
.mask_monitoring
|= ovfl_arg
->ovfl_ctrl
.bits
.mask_monitoring
;
5359 * build the bitmask of pmds to reset now
5361 if (ovfl_arg
->ovfl_ctrl
.bits
.reset_ovfl_pmds
) reset_pmds
|= mask
;
5363 pfm_stats
[this_cpu
].pfm_smpl_handler_cycles
+= end_cycles
- start_cycles
;
5366 * when the module cannot handle the rest of the overflows, we abort right here
5368 if (ret
&& pmd_mask
) {
5369 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5370 pmd_mask
<<PMU_FIRST_COUNTER
));
5373 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5375 ovfl_pmds
&= ~reset_pmds
;
5378 * when no sampling module is used, then the default
5379 * is to notify on overflow if requested by user
5381 ovfl_ctrl
.bits
.notify_user
= ovfl_notify
? 1 : 0;
5382 ovfl_ctrl
.bits
.block_task
= ovfl_notify
? 1 : 0;
5383 ovfl_ctrl
.bits
.mask_monitoring
= ovfl_notify
? 1 : 0; /* XXX: change for saturation */
5384 ovfl_ctrl
.bits
.reset_ovfl_pmds
= ovfl_notify
? 0 : 1;
5386 * if needed, we reset all overflowed pmds
5388 if (ovfl_notify
== 0) reset_pmds
= ovfl_pmds
;
5391 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds
, reset_pmds
));
5394 * reset the requested PMD registers using the short reset values
5397 unsigned long bm
= reset_pmds
;
5398 pfm_reset_regs(ctx
, &bm
, PFM_PMD_SHORT_RESET
);
5401 if (ovfl_notify
&& ovfl_ctrl
.bits
.notify_user
) {
5403 * keep track of what to reset when unblocking
5405 ctx
->ctx_ovfl_regs
[0] = ovfl_pmds
;
5408 * check for blocking context
5410 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && ovfl_ctrl
.bits
.block_task
) {
5412 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_BLOCK
;
5415 * set the perfmon specific checking pending work for the task
5417 PFM_SET_WORK_PENDING(task
, 1);
5420 * when coming from ctxsw, current still points to the
5421 * previous task, therefore we must work with task and not current.
5423 pfm_set_task_notify(task
);
5426 * defer until state is changed (shorten spin window). the context is locked
5427 * anyway, so the signal receiver would come spin for nothing.
5432 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5433 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid
: -1,
5434 PFM_GET_WORK_PENDING(task
),
5435 ctx
->ctx_fl_trap_reason
,
5438 ovfl_ctrl
.bits
.mask_monitoring
? 1 : 0));
5440 * in case monitoring must be stopped, we toggle the psr bits
5442 if (ovfl_ctrl
.bits
.mask_monitoring
) {
5443 pfm_mask_monitoring(task
);
5444 ctx
->ctx_state
= PFM_CTX_MASKED
;
5445 ctx
->ctx_fl_can_restart
= 1;
5449 * send notification now
5451 if (must_notify
) pfm_ovfl_notify_user(ctx
, ovfl_notify
);
5456 printk(KERN_ERR
"perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5458 task
? task
->pid
: -1,
5464 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5465 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5466 * come here as zombie only if the task is the current task. In which case, we
5467 * can access the PMU hardware directly.
5469 * Note that zombies do have PM_VALID set. So here we do the minimal.
5471 * In case the context was zombified it could not be reclaimed at the time
5472 * the monitoring program exited. At this point, the PMU reservation has been
5473 * returned, the sampiing buffer has been freed. We must convert this call
5474 * into a spurious interrupt. However, we must also avoid infinite overflows
5475 * by stopping monitoring for this task. We can only come here for a per-task
5476 * context. All we need to do is to stop monitoring using the psr bits which
5477 * are always task private. By re-enabling secure montioring, we ensure that
5478 * the monitored task will not be able to re-activate monitoring.
5479 * The task will eventually be context switched out, at which point the context
5480 * will be reclaimed (that includes releasing ownership of the PMU).
5482 * So there might be a window of time where the number of per-task session is zero
5483 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5484 * context. This is safe because if a per-task session comes in, it will push this one
5485 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5486 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5487 * also push our zombie context out.
5489 * Overall pretty hairy stuff....
5491 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task
? task
->pid
: -1));
5493 ia64_psr(regs
)->up
= 0;
5494 ia64_psr(regs
)->sp
= 1;
5499 pfm_do_interrupt_handler(int irq
, void *arg
, struct pt_regs
*regs
)
5501 struct task_struct
*task
;
5503 unsigned long flags
;
5505 int this_cpu
= smp_processor_id();
5508 pfm_stats
[this_cpu
].pfm_ovfl_intr_count
++;
5511 * srlz.d done before arriving here
5513 pmc0
= ia64_get_pmc(0);
5515 task
= GET_PMU_OWNER();
5516 ctx
= GET_PMU_CTX();
5519 * if we have some pending bits set
5520 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5522 if (PMC0_HAS_OVFL(pmc0
) && task
) {
5524 * we assume that pmc0.fr is always set here
5528 if (!ctx
) goto report_spurious1
;
5530 if (ctx
->ctx_fl_system
== 0 && (task
->thread
.flags
& IA64_THREAD_PM_VALID
) == 0)
5531 goto report_spurious2
;
5533 PROTECT_CTX_NOPRINT(ctx
, flags
);
5535 pfm_overflow_handler(task
, ctx
, pmc0
, regs
);
5537 UNPROTECT_CTX_NOPRINT(ctx
, flags
);
5540 pfm_stats
[this_cpu
].pfm_spurious_ovfl_intr_count
++;
5544 * keep it unfrozen at all times
5551 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5552 this_cpu
, task
->pid
);
5556 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5564 pfm_interrupt_handler(int irq
, void *arg
, struct pt_regs
*regs
)
5566 unsigned long start_cycles
, total_cycles
;
5567 unsigned long min
, max
;
5571 this_cpu
= get_cpu();
5572 if (likely(!pfm_alt_intr_handler
)) {
5573 min
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
;
5574 max
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
;
5576 start_cycles
= ia64_get_itc();
5578 ret
= pfm_do_interrupt_handler(irq
, arg
, regs
);
5580 total_cycles
= ia64_get_itc();
5583 * don't measure spurious interrupts
5585 if (likely(ret
== 0)) {
5586 total_cycles
-= start_cycles
;
5588 if (total_cycles
< min
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
= total_cycles
;
5589 if (total_cycles
> max
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
= total_cycles
;
5591 pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles
+= total_cycles
;
5595 (*pfm_alt_intr_handler
->handler
)(irq
, arg
, regs
);
5598 put_cpu_no_resched();
5603 * /proc/perfmon interface, for debug only
5606 #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5609 pfm_proc_start(struct seq_file
*m
, loff_t
*pos
)
5612 return PFM_PROC_SHOW_HEADER
;
5615 while (*pos
<= NR_CPUS
) {
5616 if (cpu_online(*pos
- 1)) {
5617 return (void *)*pos
;
5625 pfm_proc_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5628 return pfm_proc_start(m
, pos
);
5632 pfm_proc_stop(struct seq_file
*m
, void *v
)
5637 pfm_proc_show_header(struct seq_file
*m
)
5639 struct list_head
* pos
;
5640 pfm_buffer_fmt_t
* entry
;
5641 unsigned long flags
;
5644 "perfmon version : %u.%u\n"
5647 "expert mode : %s\n"
5648 "ovfl_mask : 0x%lx\n"
5649 "PMU flags : 0x%x\n",
5650 PFM_VERSION_MAJ
, PFM_VERSION_MIN
,
5652 pfm_sysctl
.fastctxsw
> 0 ? "Yes": "No",
5653 pfm_sysctl
.expert_mode
> 0 ? "Yes": "No",
5660 "proc_sessions : %u\n"
5661 "sys_sessions : %u\n"
5662 "sys_use_dbregs : %u\n"
5663 "ptrace_use_dbregs : %u\n",
5664 pfm_sessions
.pfs_task_sessions
,
5665 pfm_sessions
.pfs_sys_sessions
,
5666 pfm_sessions
.pfs_sys_use_dbregs
,
5667 pfm_sessions
.pfs_ptrace_use_dbregs
);
5671 spin_lock(&pfm_buffer_fmt_lock
);
5673 list_for_each(pos
, &pfm_buffer_fmt_list
) {
5674 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
5675 seq_printf(m
, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5686 entry
->fmt_uuid
[10],
5687 entry
->fmt_uuid
[11],
5688 entry
->fmt_uuid
[12],
5689 entry
->fmt_uuid
[13],
5690 entry
->fmt_uuid
[14],
5691 entry
->fmt_uuid
[15],
5694 spin_unlock(&pfm_buffer_fmt_lock
);
5699 pfm_proc_show(struct seq_file
*m
, void *v
)
5705 if (v
== PFM_PROC_SHOW_HEADER
) {
5706 pfm_proc_show_header(m
);
5710 /* show info for CPU (v - 1) */
5714 "CPU%-2d overflow intrs : %lu\n"
5715 "CPU%-2d overflow cycles : %lu\n"
5716 "CPU%-2d overflow min : %lu\n"
5717 "CPU%-2d overflow max : %lu\n"
5718 "CPU%-2d smpl handler calls : %lu\n"
5719 "CPU%-2d smpl handler cycles : %lu\n"
5720 "CPU%-2d spurious intrs : %lu\n"
5721 "CPU%-2d replay intrs : %lu\n"
5722 "CPU%-2d syst_wide : %d\n"
5723 "CPU%-2d dcr_pp : %d\n"
5724 "CPU%-2d exclude idle : %d\n"
5725 "CPU%-2d owner : %d\n"
5726 "CPU%-2d context : %p\n"
5727 "CPU%-2d activations : %lu\n",
5728 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_count
,
5729 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles
,
5730 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_min
,
5731 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_max
,
5732 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_calls
,
5733 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_cycles
,
5734 cpu
, pfm_stats
[cpu
].pfm_spurious_ovfl_intr_count
,
5735 cpu
, pfm_stats
[cpu
].pfm_replay_ovfl_intr_count
,
5736 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_SYST_WIDE
? 1 : 0,
5737 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_DCR_PP
? 1 : 0,
5738 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_EXCL_IDLE
? 1 : 0,
5739 cpu
, pfm_get_cpu_data(pmu_owner
, cpu
) ? pfm_get_cpu_data(pmu_owner
, cpu
)->pid
: -1,
5740 cpu
, pfm_get_cpu_data(pmu_ctx
, cpu
),
5741 cpu
, pfm_get_cpu_data(pmu_activation_number
, cpu
));
5743 if (num_online_cpus() == 1 && pfm_sysctl
.debug
> 0) {
5745 psr
= pfm_get_psr();
5750 "CPU%-2d psr : 0x%lx\n"
5751 "CPU%-2d pmc0 : 0x%lx\n",
5753 cpu
, ia64_get_pmc(0));
5755 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
5756 if (PMC_IS_COUNTING(i
) == 0) continue;
5758 "CPU%-2d pmc%u : 0x%lx\n"
5759 "CPU%-2d pmd%u : 0x%lx\n",
5760 cpu
, i
, ia64_get_pmc(i
),
5761 cpu
, i
, ia64_get_pmd(i
));
5767 struct seq_operations pfm_seq_ops
= {
5768 .start
= pfm_proc_start
,
5769 .next
= pfm_proc_next
,
5770 .stop
= pfm_proc_stop
,
5771 .show
= pfm_proc_show
5775 pfm_proc_open(struct inode
*inode
, struct file
*file
)
5777 return seq_open(file
, &pfm_seq_ops
);
5782 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5783 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5784 * is active or inactive based on mode. We must rely on the value in
5785 * local_cpu_data->pfm_syst_info
5788 pfm_syst_wide_update_task(struct task_struct
*task
, unsigned long info
, int is_ctxswin
)
5790 struct pt_regs
*regs
;
5792 unsigned long dcr_pp
;
5794 dcr_pp
= info
& PFM_CPUINFO_DCR_PP
? 1 : 0;
5797 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5798 * on every CPU, so we can rely on the pid to identify the idle task.
5800 if ((info
& PFM_CPUINFO_EXCL_IDLE
) == 0 || task
->pid
) {
5801 regs
= task_pt_regs(task
);
5802 ia64_psr(regs
)->pp
= is_ctxswin
? dcr_pp
: 0;
5806 * if monitoring has started
5809 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
5811 * context switching in?
5814 /* mask monitoring for the idle task */
5815 ia64_setreg(_IA64_REG_CR_DCR
, dcr
& ~IA64_DCR_PP
);
5821 * context switching out
5822 * restore monitoring for next task
5824 * Due to inlining this odd if-then-else construction generates
5827 ia64_setreg(_IA64_REG_CR_DCR
, dcr
|IA64_DCR_PP
);
5836 pfm_force_cleanup(pfm_context_t
*ctx
, struct pt_regs
*regs
)
5838 struct task_struct
*task
= ctx
->ctx_task
;
5840 ia64_psr(regs
)->up
= 0;
5841 ia64_psr(regs
)->sp
= 1;
5843 if (GET_PMU_OWNER() == task
) {
5844 DPRINT(("cleared ownership for [%d]\n", ctx
->ctx_task
->pid
));
5845 SET_PMU_OWNER(NULL
, NULL
);
5849 * disconnect the task from the context and vice-versa
5851 PFM_SET_WORK_PENDING(task
, 0);
5853 task
->thread
.pfm_context
= NULL
;
5854 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
5856 DPRINT(("force cleanup for [%d]\n", task
->pid
));
5861 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5864 pfm_save_regs(struct task_struct
*task
)
5867 struct thread_struct
*t
;
5868 unsigned long flags
;
5872 ctx
= PFM_GET_CTX(task
);
5873 if (ctx
== NULL
) return;
5877 * we always come here with interrupts ALREADY disabled by
5878 * the scheduler. So we simply need to protect against concurrent
5879 * access, not CPU concurrency.
5881 flags
= pfm_protect_ctx_ctxsw(ctx
);
5883 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5884 struct pt_regs
*regs
= task_pt_regs(task
);
5888 pfm_force_cleanup(ctx
, regs
);
5890 BUG_ON(ctx
->ctx_smpl_hdr
);
5892 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5894 pfm_context_free(ctx
);
5899 * save current PSR: needed because we modify it
5902 psr
= pfm_get_psr();
5904 BUG_ON(psr
& (IA64_PSR_I
));
5908 * This is the last instruction which may generate an overflow
5910 * We do not need to set psr.sp because, it is irrelevant in kernel.
5911 * It will be restored from ipsr when going back to user level
5916 * keep a copy of psr.up (for reload)
5918 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5921 * release ownership of this PMU.
5922 * PM interrupts are masked, so nothing
5925 SET_PMU_OWNER(NULL
, NULL
);
5928 * we systematically save the PMD as we have no
5929 * guarantee we will be schedule at that same
5932 pfm_save_pmds(t
->pmds
, ctx
->ctx_used_pmds
[0]);
5935 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5936 * we will need it on the restore path to check
5937 * for pending overflow.
5939 t
->pmcs
[0] = ia64_get_pmc(0);
5942 * unfreeze PMU if had pending overflows
5944 if (t
->pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
5947 * finally, allow context access.
5948 * interrupts will still be masked after this call.
5950 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5953 #else /* !CONFIG_SMP */
5955 pfm_save_regs(struct task_struct
*task
)
5960 ctx
= PFM_GET_CTX(task
);
5961 if (ctx
== NULL
) return;
5964 * save current PSR: needed because we modify it
5966 psr
= pfm_get_psr();
5968 BUG_ON(psr
& (IA64_PSR_I
));
5972 * This is the last instruction which may generate an overflow
5974 * We do not need to set psr.sp because, it is irrelevant in kernel.
5975 * It will be restored from ipsr when going back to user level
5980 * keep a copy of psr.up (for reload)
5982 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5986 pfm_lazy_save_regs (struct task_struct
*task
)
5989 struct thread_struct
*t
;
5990 unsigned long flags
;
5992 { u64 psr
= pfm_get_psr();
5993 BUG_ON(psr
& IA64_PSR_UP
);
5996 ctx
= PFM_GET_CTX(task
);
6000 * we need to mask PMU overflow here to
6001 * make sure that we maintain pmc0 until
6002 * we save it. overflow interrupts are
6003 * treated as spurious if there is no
6006 * XXX: I don't think this is necessary
6008 PROTECT_CTX(ctx
,flags
);
6011 * release ownership of this PMU.
6012 * must be done before we save the registers.
6014 * after this call any PMU interrupt is treated
6017 SET_PMU_OWNER(NULL
, NULL
);
6020 * save all the pmds we use
6022 pfm_save_pmds(t
->pmds
, ctx
->ctx_used_pmds
[0]);
6025 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6026 * it is needed to check for pended overflow
6027 * on the restore path
6029 t
->pmcs
[0] = ia64_get_pmc(0);
6032 * unfreeze PMU if had pending overflows
6034 if (t
->pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
6037 * now get can unmask PMU interrupts, they will
6038 * be treated as purely spurious and we will not
6039 * lose any information
6041 UNPROTECT_CTX(ctx
,flags
);
6043 #endif /* CONFIG_SMP */
6047 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6050 pfm_load_regs (struct task_struct
*task
)
6053 struct thread_struct
*t
;
6054 unsigned long pmc_mask
= 0UL, pmd_mask
= 0UL;
6055 unsigned long flags
;
6057 int need_irq_resend
;
6059 ctx
= PFM_GET_CTX(task
);
6060 if (unlikely(ctx
== NULL
)) return;
6062 BUG_ON(GET_PMU_OWNER());
6066 * possible on unload
6068 if (unlikely((t
->flags
& IA64_THREAD_PM_VALID
) == 0)) return;
6071 * we always come here with interrupts ALREADY disabled by
6072 * the scheduler. So we simply need to protect against concurrent
6073 * access, not CPU concurrency.
6075 flags
= pfm_protect_ctx_ctxsw(ctx
);
6076 psr
= pfm_get_psr();
6078 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6080 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6081 BUG_ON(psr
& IA64_PSR_I
);
6083 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) {
6084 struct pt_regs
*regs
= task_pt_regs(task
);
6086 BUG_ON(ctx
->ctx_smpl_hdr
);
6088 pfm_force_cleanup(ctx
, regs
);
6090 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6093 * this one (kmalloc'ed) is fine with interrupts disabled
6095 pfm_context_free(ctx
);
6101 * we restore ALL the debug registers to avoid picking up
6104 if (ctx
->ctx_fl_using_dbreg
) {
6105 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6106 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6109 * retrieve saved psr.up
6111 psr_up
= ctx
->ctx_saved_psr_up
;
6114 * if we were the last user of the PMU on that CPU,
6115 * then nothing to do except restore psr
6117 if (GET_LAST_CPU(ctx
) == smp_processor_id() && ctx
->ctx_last_activation
== GET_ACTIVATION()) {
6120 * retrieve partial reload masks (due to user modifications)
6122 pmc_mask
= ctx
->ctx_reload_pmcs
[0];
6123 pmd_mask
= ctx
->ctx_reload_pmds
[0];
6127 * To avoid leaking information to the user level when psr.sp=0,
6128 * we must reload ALL implemented pmds (even the ones we don't use).
6129 * In the kernel we only allow PFM_READ_PMDS on registers which
6130 * we initialized or requested (sampling) so there is no risk there.
6132 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6135 * ALL accessible PMCs are systematically reloaded, unused registers
6136 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6137 * up stale configuration.
6139 * PMC0 is never in the mask. It is always restored separately.
6141 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6144 * when context is MASKED, we will restore PMC with plm=0
6145 * and PMD with stale information, but that's ok, nothing
6148 * XXX: optimize here
6150 if (pmd_mask
) pfm_restore_pmds(t
->pmds
, pmd_mask
);
6151 if (pmc_mask
) pfm_restore_pmcs(t
->pmcs
, pmc_mask
);
6154 * check for pending overflow at the time the state
6157 if (unlikely(PMC0_HAS_OVFL(t
->pmcs
[0]))) {
6159 * reload pmc0 with the overflow information
6160 * On McKinley PMU, this will trigger a PMU interrupt
6162 ia64_set_pmc(0, t
->pmcs
[0]);
6167 * will replay the PMU interrupt
6169 if (need_irq_resend
) ia64_resend_irq(IA64_PERFMON_VECTOR
);
6171 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6175 * we just did a reload, so we reset the partial reload fields
6177 ctx
->ctx_reload_pmcs
[0] = 0UL;
6178 ctx
->ctx_reload_pmds
[0] = 0UL;
6180 SET_LAST_CPU(ctx
, smp_processor_id());
6183 * dump activation value for this PMU
6187 * record current activation for this context
6189 SET_ACTIVATION(ctx
);
6192 * establish new ownership.
6194 SET_PMU_OWNER(task
, ctx
);
6197 * restore the psr.up bit. measurement
6199 * no PMU interrupt can happen at this point
6200 * because we still have interrupts disabled.
6202 if (likely(psr_up
)) pfm_set_psr_up();
6205 * allow concurrent access to context
6207 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6209 #else /* !CONFIG_SMP */
6211 * reload PMU state for UP kernels
6212 * in 2.5 we come here with interrupts disabled
6215 pfm_load_regs (struct task_struct
*task
)
6217 struct thread_struct
*t
;
6219 struct task_struct
*owner
;
6220 unsigned long pmd_mask
, pmc_mask
;
6222 int need_irq_resend
;
6224 owner
= GET_PMU_OWNER();
6225 ctx
= PFM_GET_CTX(task
);
6227 psr
= pfm_get_psr();
6229 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6230 BUG_ON(psr
& IA64_PSR_I
);
6233 * we restore ALL the debug registers to avoid picking up
6236 * This must be done even when the task is still the owner
6237 * as the registers may have been modified via ptrace()
6238 * (not perfmon) by the previous task.
6240 if (ctx
->ctx_fl_using_dbreg
) {
6241 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6242 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6246 * retrieved saved psr.up
6248 psr_up
= ctx
->ctx_saved_psr_up
;
6249 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6252 * short path, our state is still there, just
6253 * need to restore psr and we go
6255 * we do not touch either PMC nor PMD. the psr is not touched
6256 * by the overflow_handler. So we are safe w.r.t. to interrupt
6257 * concurrency even without interrupt masking.
6259 if (likely(owner
== task
)) {
6260 if (likely(psr_up
)) pfm_set_psr_up();
6265 * someone else is still using the PMU, first push it out and
6266 * then we'll be able to install our stuff !
6268 * Upon return, there will be no owner for the current PMU
6270 if (owner
) pfm_lazy_save_regs(owner
);
6273 * To avoid leaking information to the user level when psr.sp=0,
6274 * we must reload ALL implemented pmds (even the ones we don't use).
6275 * In the kernel we only allow PFM_READ_PMDS on registers which
6276 * we initialized or requested (sampling) so there is no risk there.
6278 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6281 * ALL accessible PMCs are systematically reloaded, unused registers
6282 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6283 * up stale configuration.
6285 * PMC0 is never in the mask. It is always restored separately
6287 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6289 pfm_restore_pmds(t
->pmds
, pmd_mask
);
6290 pfm_restore_pmcs(t
->pmcs
, pmc_mask
);
6293 * check for pending overflow at the time the state
6296 if (unlikely(PMC0_HAS_OVFL(t
->pmcs
[0]))) {
6298 * reload pmc0 with the overflow information
6299 * On McKinley PMU, this will trigger a PMU interrupt
6301 ia64_set_pmc(0, t
->pmcs
[0]);
6307 * will replay the PMU interrupt
6309 if (need_irq_resend
) ia64_resend_irq(IA64_PERFMON_VECTOR
);
6311 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6315 * establish new ownership.
6317 SET_PMU_OWNER(task
, ctx
);
6320 * restore the psr.up bit. measurement
6322 * no PMU interrupt can happen at this point
6323 * because we still have interrupts disabled.
6325 if (likely(psr_up
)) pfm_set_psr_up();
6327 #endif /* CONFIG_SMP */
6330 * this function assumes monitoring is stopped
6333 pfm_flush_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
6336 unsigned long mask2
, val
, pmd_val
, ovfl_val
;
6337 int i
, can_access_pmu
= 0;
6341 * is the caller the task being monitored (or which initiated the
6342 * session for system wide measurements)
6344 is_self
= ctx
->ctx_task
== task
? 1 : 0;
6347 * can access PMU is task is the owner of the PMU state on the current CPU
6348 * or if we are running on the CPU bound to the context in system-wide mode
6349 * (that is not necessarily the task the context is attached to in this mode).
6350 * In system-wide we always have can_access_pmu true because a task running on an
6351 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6353 can_access_pmu
= (GET_PMU_OWNER() == task
) || (ctx
->ctx_fl_system
&& ctx
->ctx_cpu
== smp_processor_id());
6354 if (can_access_pmu
) {
6356 * Mark the PMU as not owned
6357 * This will cause the interrupt handler to do nothing in case an overflow
6358 * interrupt was in-flight
6359 * This also guarantees that pmc0 will contain the final state
6360 * It virtually gives us full control on overflow processing from that point
6363 SET_PMU_OWNER(NULL
, NULL
);
6364 DPRINT(("releasing ownership\n"));
6367 * read current overflow status:
6369 * we are guaranteed to read the final stable state
6372 pmc0
= ia64_get_pmc(0); /* slow */
6375 * reset freeze bit, overflow status information destroyed
6379 pmc0
= task
->thread
.pmcs
[0];
6381 * clear whatever overflow status bits there were
6383 task
->thread
.pmcs
[0] = 0;
6385 ovfl_val
= pmu_conf
->ovfl_val
;
6387 * we save all the used pmds
6388 * we take care of overflows for counting PMDs
6390 * XXX: sampling situation is not taken into account here
6392 mask2
= ctx
->ctx_used_pmds
[0];
6394 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self
, ovfl_val
, mask2
));
6396 for (i
= 0; mask2
; i
++, mask2
>>=1) {
6398 /* skip non used pmds */
6399 if ((mask2
& 0x1) == 0) continue;
6402 * can access PMU always true in system wide mode
6404 val
= pmd_val
= can_access_pmu
? ia64_get_pmd(i
) : task
->thread
.pmds
[i
];
6406 if (PMD_IS_COUNTING(i
)) {
6407 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6410 ctx
->ctx_pmds
[i
].val
,
6414 * we rebuild the full 64 bit value of the counter
6416 val
= ctx
->ctx_pmds
[i
].val
+ (val
& ovfl_val
);
6419 * now everything is in ctx_pmds[] and we need
6420 * to clear the saved context from save_regs() such that
6421 * pfm_read_pmds() gets the correct value
6426 * take care of overflow inline
6428 if (pmc0
& (1UL << i
)) {
6429 val
+= 1 + ovfl_val
;
6430 DPRINT(("[%d] pmd[%d] overflowed\n", task
->pid
, i
));
6434 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task
->pid
, i
, val
, pmd_val
));
6436 if (is_self
) task
->thread
.pmds
[i
] = pmd_val
;
6438 ctx
->ctx_pmds
[i
].val
= val
;
6442 static struct irqaction perfmon_irqaction
= {
6443 .handler
= pfm_interrupt_handler
,
6444 .flags
= IRQF_DISABLED
,
6449 pfm_alt_save_pmu_state(void *data
)
6451 struct pt_regs
*regs
;
6453 regs
= task_pt_regs(current
);
6455 DPRINT(("called\n"));
6458 * should not be necessary but
6459 * let's take not risk
6463 ia64_psr(regs
)->pp
= 0;
6466 * This call is required
6467 * May cause a spurious interrupt on some processors
6475 pfm_alt_restore_pmu_state(void *data
)
6477 struct pt_regs
*regs
;
6479 regs
= task_pt_regs(current
);
6481 DPRINT(("called\n"));
6484 * put PMU back in state expected
6489 ia64_psr(regs
)->pp
= 0;
6492 * perfmon runs with PMU unfrozen at all times
6500 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6505 /* some sanity checks */
6506 if (hdl
== NULL
|| hdl
->handler
== NULL
) return -EINVAL
;
6508 /* do the easy test first */
6509 if (pfm_alt_intr_handler
) return -EBUSY
;
6511 /* one at a time in the install or remove, just fail the others */
6512 if (!spin_trylock(&pfm_alt_install_check
)) {
6516 /* reserve our session */
6517 for_each_online_cpu(reserve_cpu
) {
6518 ret
= pfm_reserve_session(NULL
, 1, reserve_cpu
);
6519 if (ret
) goto cleanup_reserve
;
6522 /* save the current system wide pmu states */
6523 ret
= on_each_cpu(pfm_alt_save_pmu_state
, NULL
, 0, 1);
6525 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6526 goto cleanup_reserve
;
6529 /* officially change to the alternate interrupt handler */
6530 pfm_alt_intr_handler
= hdl
;
6532 spin_unlock(&pfm_alt_install_check
);
6537 for_each_online_cpu(i
) {
6538 /* don't unreserve more than we reserved */
6539 if (i
>= reserve_cpu
) break;
6541 pfm_unreserve_session(NULL
, 1, i
);
6544 spin_unlock(&pfm_alt_install_check
);
6548 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt
);
6551 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6556 if (hdl
== NULL
) return -EINVAL
;
6558 /* cannot remove someone else's handler! */
6559 if (pfm_alt_intr_handler
!= hdl
) return -EINVAL
;
6561 /* one at a time in the install or remove, just fail the others */
6562 if (!spin_trylock(&pfm_alt_install_check
)) {
6566 pfm_alt_intr_handler
= NULL
;
6568 ret
= on_each_cpu(pfm_alt_restore_pmu_state
, NULL
, 0, 1);
6570 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6573 for_each_online_cpu(i
) {
6574 pfm_unreserve_session(NULL
, 1, i
);
6577 spin_unlock(&pfm_alt_install_check
);
6581 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt
);
6584 * perfmon initialization routine, called from the initcall() table
6586 static int init_pfm_fs(void);
6594 family
= local_cpu_data
->family
;
6599 if ((*p
)->probe() == 0) goto found
;
6600 } else if ((*p
)->pmu_family
== family
|| (*p
)->pmu_family
== 0xff) {
6611 static struct file_operations pfm_proc_fops
= {
6612 .open
= pfm_proc_open
,
6614 .llseek
= seq_lseek
,
6615 .release
= seq_release
,
6621 unsigned int n
, n_counters
, i
;
6623 printk("perfmon: version %u.%u IRQ %u\n",
6626 IA64_PERFMON_VECTOR
);
6628 if (pfm_probe_pmu()) {
6629 printk(KERN_INFO
"perfmon: disabled, there is no support for processor family %d\n",
6630 local_cpu_data
->family
);
6635 * compute the number of implemented PMD/PMC from the
6636 * description tables
6639 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
6640 if (PMC_IS_IMPL(i
) == 0) continue;
6641 pmu_conf
->impl_pmcs
[i
>>6] |= 1UL << (i
&63);
6644 pmu_conf
->num_pmcs
= n
;
6646 n
= 0; n_counters
= 0;
6647 for (i
=0; PMD_IS_LAST(i
) == 0; i
++) {
6648 if (PMD_IS_IMPL(i
) == 0) continue;
6649 pmu_conf
->impl_pmds
[i
>>6] |= 1UL << (i
&63);
6651 if (PMD_IS_COUNTING(i
)) n_counters
++;
6653 pmu_conf
->num_pmds
= n
;
6654 pmu_conf
->num_counters
= n_counters
;
6657 * sanity checks on the number of debug registers
6659 if (pmu_conf
->use_rr_dbregs
) {
6660 if (pmu_conf
->num_ibrs
> IA64_NUM_DBG_REGS
) {
6661 printk(KERN_INFO
"perfmon: unsupported number of code debug registers (%u)\n", pmu_conf
->num_ibrs
);
6665 if (pmu_conf
->num_dbrs
> IA64_NUM_DBG_REGS
) {
6666 printk(KERN_INFO
"perfmon: unsupported number of data debug registers (%u)\n", pmu_conf
->num_ibrs
);
6672 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6676 pmu_conf
->num_counters
,
6677 ffz(pmu_conf
->ovfl_val
));
6680 if (pmu_conf
->num_pmds
>= IA64_NUM_PMD_REGS
|| pmu_conf
->num_pmcs
>= IA64_NUM_PMC_REGS
) {
6681 printk(KERN_ERR
"perfmon: not enough pmc/pmd, perfmon disabled\n");
6687 * create /proc/perfmon (mostly for debugging purposes)
6689 perfmon_dir
= create_proc_entry("perfmon", S_IRUGO
, NULL
);
6690 if (perfmon_dir
== NULL
) {
6691 printk(KERN_ERR
"perfmon: cannot create /proc entry, perfmon disabled\n");
6696 * install customized file operations for /proc/perfmon entry
6698 perfmon_dir
->proc_fops
= &pfm_proc_fops
;
6701 * create /proc/sys/kernel/perfmon (for debugging purposes)
6703 pfm_sysctl_header
= register_sysctl_table(pfm_sysctl_root
, 0);
6706 * initialize all our spinlocks
6708 spin_lock_init(&pfm_sessions
.pfs_lock
);
6709 spin_lock_init(&pfm_buffer_fmt_lock
);
6713 for(i
=0; i
< NR_CPUS
; i
++) pfm_stats
[i
].pfm_ovfl_intr_cycles_min
= ~0UL;
6718 __initcall(pfm_init
);
6721 * this function is called before pfm_init()
6724 pfm_init_percpu (void)
6726 static int first_time
=1;
6728 * make sure no measurement is active
6729 * (may inherit programmed PMCs from EFI).
6735 * we run with the PMU not frozen at all times
6740 register_percpu_irq(IA64_PERFMON_VECTOR
, &perfmon_irqaction
);
6744 ia64_setreg(_IA64_REG_CR_PMV
, IA64_PERFMON_VECTOR
);
6749 * used for debug purposes only
6752 dump_pmu_state(const char *from
)
6754 struct task_struct
*task
;
6755 struct thread_struct
*t
;
6756 struct pt_regs
*regs
;
6758 unsigned long psr
, dcr
, info
, flags
;
6761 local_irq_save(flags
);
6763 this_cpu
= smp_processor_id();
6764 regs
= task_pt_regs(current
);
6765 info
= PFM_CPUINFO_GET();
6766 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
6768 if (info
== 0 && ia64_psr(regs
)->pp
== 0 && (dcr
& IA64_DCR_PP
) == 0) {
6769 local_irq_restore(flags
);
6773 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6780 task
= GET_PMU_OWNER();
6781 ctx
= GET_PMU_CTX();
6783 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu
, task
? task
->pid
: -1, ctx
);
6785 psr
= pfm_get_psr();
6787 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6790 psr
& IA64_PSR_PP
? 1 : 0,
6791 psr
& IA64_PSR_UP
? 1 : 0,
6792 dcr
& IA64_DCR_PP
? 1 : 0,
6795 ia64_psr(regs
)->pp
);
6797 ia64_psr(regs
)->up
= 0;
6798 ia64_psr(regs
)->pp
= 0;
6800 t
= ¤t
->thread
;
6802 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
6803 if (PMC_IS_IMPL(i
) == 0) continue;
6804 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmc(i
), i
, t
->pmcs
[i
]);
6807 for (i
=1; PMD_IS_LAST(i
) == 0; i
++) {
6808 if (PMD_IS_IMPL(i
) == 0) continue;
6809 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmd(i
), i
, t
->pmds
[i
]);
6813 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6816 ctx
->ctx_smpl_vaddr
,
6820 ctx
->ctx_saved_psr_up
);
6822 local_irq_restore(flags
);
6826 * called from process.c:copy_thread(). task is new child.
6829 pfm_inherit(struct task_struct
*task
, struct pt_regs
*regs
)
6831 struct thread_struct
*thread
;
6833 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task
->pid
));
6835 thread
= &task
->thread
;
6838 * cut links inherited from parent (current)
6840 thread
->pfm_context
= NULL
;
6842 PFM_SET_WORK_PENDING(task
, 0);
6845 * the psr bits are already set properly in copy_threads()
6848 #else /* !CONFIG_PERFMON */
6850 sys_perfmonctl (int fd
, int cmd
, void *arg
, int count
)
6854 #endif /* CONFIG_PERFMON */