2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
14 * Copyright (C) 1999-2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp_lock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/seq_file.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
33 #include <linux/sysctl.h>
34 #include <linux/list.h>
35 #include <linux/file.h>
36 #include <linux/poll.h>
37 #include <linux/vfs.h>
38 #include <linux/pagemap.h>
39 #include <linux/mount.h>
40 #include <linux/bitops.h>
42 #include <asm/errno.h>
43 #include <asm/intrinsics.h>
45 #include <asm/perfmon.h>
46 #include <asm/processor.h>
47 #include <asm/signal.h>
48 #include <asm/system.h>
49 #include <asm/uaccess.h>
50 #include <asm/delay.h>
54 * perfmon context state
56 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
57 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
58 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
59 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
61 #define PFM_INVALID_ACTIVATION (~0UL)
64 * depth of message queue
66 #define PFM_MAX_MSGS 32
67 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
70 * type of a PMU register (bitmask).
72 * bit0 : register implemented
75 * bit4 : pmc has pmc.pm
76 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
77 * bit6-7 : register type
80 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
81 #define PFM_REG_IMPL 0x1 /* register implemented */
82 #define PFM_REG_END 0x2 /* end marker */
83 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
84 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
85 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
86 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
87 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
89 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
90 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
92 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
94 /* i assumed unsigned */
95 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
96 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
98 /* XXX: these assume that register i is implemented */
99 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
100 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
101 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
102 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
104 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
105 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
106 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
107 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
109 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
110 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
112 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
113 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
114 #define PFM_CTX_TASK(h) (h)->ctx_task
116 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
118 /* XXX: does not support more than 64 PMDs */
119 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
120 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
122 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
124 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
125 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
126 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
127 #define PFM_CODE_RR 0 /* requesting code range restriction */
128 #define PFM_DATA_RR 1 /* requestion data range restriction */
130 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
131 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
132 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
134 #define RDEP(x) (1UL<<(x))
137 * context protection macros
139 * - we need to protect against CPU concurrency (spin_lock)
140 * - we need to protect against PMU overflow interrupts (local_irq_disable)
142 * - we need to protect against PMU overflow interrupts (local_irq_disable)
144 * spin_lock_irqsave()/spin_lock_irqrestore():
145 * in SMP: local_irq_disable + spin_lock
146 * in UP : local_irq_disable
148 * spin_lock()/spin_lock():
149 * in UP : removed automatically
150 * in SMP: protect against context accesses from other CPU. interrupts
151 * are not masked. This is useful for the PMU interrupt handler
152 * because we know we will not get PMU concurrency in that code.
154 #define PROTECT_CTX(c, f) \
156 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
157 spin_lock_irqsave(&(c)->ctx_lock, f); \
158 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
161 #define UNPROTECT_CTX(c, f) \
163 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
164 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
167 #define PROTECT_CTX_NOPRINT(c, f) \
169 spin_lock_irqsave(&(c)->ctx_lock, f); \
173 #define UNPROTECT_CTX_NOPRINT(c, f) \
175 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
179 #define PROTECT_CTX_NOIRQ(c) \
181 spin_lock(&(c)->ctx_lock); \
184 #define UNPROTECT_CTX_NOIRQ(c) \
186 spin_unlock(&(c)->ctx_lock); \
192 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
193 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
194 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
196 #else /* !CONFIG_SMP */
197 #define SET_ACTIVATION(t) do {} while(0)
198 #define GET_ACTIVATION(t) do {} while(0)
199 #define INC_ACTIVATION(t) do {} while(0)
200 #endif /* CONFIG_SMP */
202 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
203 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
204 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
206 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
207 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
209 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
212 * cmp0 must be the value of pmc0
214 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
216 #define PFMFS_MAGIC 0xa0b4d889
221 #define PFM_DEBUGGING 1
225 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
228 #define DPRINT_ovfl(a) \
230 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
235 * 64-bit software counter structure
237 * the next_reset_type is applied to the next call to pfm_reset_regs()
240 unsigned long val
; /* virtual 64bit counter value */
241 unsigned long lval
; /* last reset value */
242 unsigned long long_reset
; /* reset value on sampling overflow */
243 unsigned long short_reset
; /* reset value on overflow */
244 unsigned long reset_pmds
[4]; /* which other pmds to reset when this counter overflows */
245 unsigned long smpl_pmds
[4]; /* which pmds are accessed when counter overflow */
246 unsigned long seed
; /* seed for random-number generator */
247 unsigned long mask
; /* mask for random-number generator */
248 unsigned int flags
; /* notify/do not notify */
249 unsigned long eventid
; /* overflow event identifier */
256 unsigned int block
:1; /* when 1, task will blocked on user notifications */
257 unsigned int system
:1; /* do system wide monitoring */
258 unsigned int using_dbreg
:1; /* using range restrictions (debug registers) */
259 unsigned int is_sampling
:1; /* true if using a custom format */
260 unsigned int excl_idle
:1; /* exclude idle task in system wide session */
261 unsigned int going_zombie
:1; /* context is zombie (MASKED+blocking) */
262 unsigned int trap_reason
:2; /* reason for going into pfm_handle_work() */
263 unsigned int no_msg
:1; /* no message sent on overflow */
264 unsigned int can_restart
:1; /* allowed to issue a PFM_RESTART */
265 unsigned int reserved
:22;
266 } pfm_context_flags_t
;
268 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
269 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
270 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
274 * perfmon context: encapsulates all the state of a monitoring session
277 typedef struct pfm_context
{
278 spinlock_t ctx_lock
; /* context protection */
280 pfm_context_flags_t ctx_flags
; /* bitmask of flags (block reason incl.) */
281 unsigned int ctx_state
; /* state: active/inactive (no bitfield) */
283 struct task_struct
*ctx_task
; /* task to which context is attached */
285 unsigned long ctx_ovfl_regs
[4]; /* which registers overflowed (notification) */
287 struct semaphore ctx_restart_sem
; /* use for blocking notification mode */
289 unsigned long ctx_used_pmds
[4]; /* bitmask of PMD used */
290 unsigned long ctx_all_pmds
[4]; /* bitmask of all accessible PMDs */
291 unsigned long ctx_reload_pmds
[4]; /* bitmask of force reload PMD on ctxsw in */
293 unsigned long ctx_all_pmcs
[4]; /* bitmask of all accessible PMCs */
294 unsigned long ctx_reload_pmcs
[4]; /* bitmask of force reload PMC on ctxsw in */
295 unsigned long ctx_used_monitors
[4]; /* bitmask of monitor PMC being used */
297 unsigned long ctx_pmcs
[IA64_NUM_PMC_REGS
]; /* saved copies of PMC values */
299 unsigned int ctx_used_ibrs
[1]; /* bitmask of used IBR (speedup ctxsw in) */
300 unsigned int ctx_used_dbrs
[1]; /* bitmask of used DBR (speedup ctxsw in) */
301 unsigned long ctx_dbrs
[IA64_NUM_DBG_REGS
]; /* DBR values (cache) when not loaded */
302 unsigned long ctx_ibrs
[IA64_NUM_DBG_REGS
]; /* IBR values (cache) when not loaded */
304 pfm_counter_t ctx_pmds
[IA64_NUM_PMD_REGS
]; /* software state for PMDS */
306 u64 ctx_saved_psr_up
; /* only contains psr.up value */
308 unsigned long ctx_last_activation
; /* context last activation number for last_cpu */
309 unsigned int ctx_last_cpu
; /* CPU id of current or last CPU used (SMP only) */
310 unsigned int ctx_cpu
; /* cpu to which perfmon is applied (system wide) */
312 int ctx_fd
; /* file descriptor used my this context */
313 pfm_ovfl_arg_t ctx_ovfl_arg
; /* argument to custom buffer format handler */
315 pfm_buffer_fmt_t
*ctx_buf_fmt
; /* buffer format callbacks */
316 void *ctx_smpl_hdr
; /* points to sampling buffer header kernel vaddr */
317 unsigned long ctx_smpl_size
; /* size of sampling buffer */
318 void *ctx_smpl_vaddr
; /* user level virtual address of smpl buffer */
320 wait_queue_head_t ctx_msgq_wait
;
321 pfm_msg_t ctx_msgq
[PFM_MAX_MSGS
];
324 struct fasync_struct
*ctx_async_queue
;
326 wait_queue_head_t ctx_zombieq
; /* termination cleanup wait queue */
330 * magic number used to verify that structure is really
333 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
335 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
338 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
339 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
341 #define SET_LAST_CPU(ctx, v) do {} while(0)
342 #define GET_LAST_CPU(ctx) do {} while(0)
346 #define ctx_fl_block ctx_flags.block
347 #define ctx_fl_system ctx_flags.system
348 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
349 #define ctx_fl_is_sampling ctx_flags.is_sampling
350 #define ctx_fl_excl_idle ctx_flags.excl_idle
351 #define ctx_fl_going_zombie ctx_flags.going_zombie
352 #define ctx_fl_trap_reason ctx_flags.trap_reason
353 #define ctx_fl_no_msg ctx_flags.no_msg
354 #define ctx_fl_can_restart ctx_flags.can_restart
356 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
357 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
360 * global information about all sessions
361 * mostly used to synchronize between system wide and per-process
364 spinlock_t pfs_lock
; /* lock the structure */
366 unsigned int pfs_task_sessions
; /* number of per task sessions */
367 unsigned int pfs_sys_sessions
; /* number of per system wide sessions */
368 unsigned int pfs_sys_use_dbregs
; /* incremented when a system wide session uses debug regs */
369 unsigned int pfs_ptrace_use_dbregs
; /* incremented when a process uses debug regs */
370 struct task_struct
*pfs_sys_session
[NR_CPUS
]; /* point to task owning a system-wide session */
374 * information about a PMC or PMD.
375 * dep_pmd[]: a bitmask of dependent PMD registers
376 * dep_pmc[]: a bitmask of dependent PMC registers
378 typedef int (*pfm_reg_check_t
)(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned int cnum
, unsigned long *val
, struct pt_regs
*regs
);
382 unsigned long default_value
; /* power-on default value */
383 unsigned long reserved_mask
; /* bitmask of reserved bits */
384 pfm_reg_check_t read_check
;
385 pfm_reg_check_t write_check
;
386 unsigned long dep_pmd
[4];
387 unsigned long dep_pmc
[4];
390 /* assume cnum is a valid monitor */
391 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
394 * This structure is initialized at boot time and contains
395 * a description of the PMU main characteristics.
397 * If the probe function is defined, detection is based
398 * on its return value:
399 * - 0 means recognized PMU
400 * - anything else means not supported
401 * When the probe function is not defined, then the pmu_family field
402 * is used and it must match the host CPU family such that:
403 * - cpu->family & config->pmu_family != 0
406 unsigned long ovfl_val
; /* overflow value for counters */
408 pfm_reg_desc_t
*pmc_desc
; /* detailed PMC register dependencies descriptions */
409 pfm_reg_desc_t
*pmd_desc
; /* detailed PMD register dependencies descriptions */
411 unsigned int num_pmcs
; /* number of PMCS: computed at init time */
412 unsigned int num_pmds
; /* number of PMDS: computed at init time */
413 unsigned long impl_pmcs
[4]; /* bitmask of implemented PMCS */
414 unsigned long impl_pmds
[4]; /* bitmask of implemented PMDS */
416 char *pmu_name
; /* PMU family name */
417 unsigned int pmu_family
; /* cpuid family pattern used to identify pmu */
418 unsigned int flags
; /* pmu specific flags */
419 unsigned int num_ibrs
; /* number of IBRS: computed at init time */
420 unsigned int num_dbrs
; /* number of DBRS: computed at init time */
421 unsigned int num_counters
; /* PMC/PMD counting pairs : computed at init time */
422 int (*probe
)(void); /* customized probe routine */
423 unsigned int use_rr_dbregs
:1; /* set if debug registers used for range restriction */
428 #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
431 * debug register related type definitions
434 unsigned long ibr_mask
:56;
435 unsigned long ibr_plm
:4;
436 unsigned long ibr_ig
:3;
437 unsigned long ibr_x
:1;
441 unsigned long dbr_mask
:56;
442 unsigned long dbr_plm
:4;
443 unsigned long dbr_ig
:2;
444 unsigned long dbr_w
:1;
445 unsigned long dbr_r
:1;
456 * perfmon command descriptions
459 int (*cmd_func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
462 unsigned int cmd_narg
;
464 int (*cmd_getsize
)(void *arg
, size_t *sz
);
467 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
468 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
469 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
470 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
473 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
474 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
475 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
476 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
477 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
479 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
482 unsigned long pfm_spurious_ovfl_intr_count
; /* keep track of spurious ovfl interrupts */
483 unsigned long pfm_replay_ovfl_intr_count
; /* keep track of replayed ovfl interrupts */
484 unsigned long pfm_ovfl_intr_count
; /* keep track of ovfl interrupts */
485 unsigned long pfm_ovfl_intr_cycles
; /* cycles spent processing ovfl interrupts */
486 unsigned long pfm_ovfl_intr_cycles_min
; /* min cycles spent processing ovfl interrupts */
487 unsigned long pfm_ovfl_intr_cycles_max
; /* max cycles spent processing ovfl interrupts */
488 unsigned long pfm_smpl_handler_calls
;
489 unsigned long pfm_smpl_handler_cycles
;
490 char pad
[SMP_CACHE_BYTES
] ____cacheline_aligned
;
494 * perfmon internal variables
496 static pfm_stats_t pfm_stats
[NR_CPUS
];
497 static pfm_session_t pfm_sessions
; /* global sessions information */
499 static spinlock_t pfm_alt_install_check
= SPIN_LOCK_UNLOCKED
;
500 static pfm_intr_handler_desc_t
*pfm_alt_intr_handler
;
502 static struct proc_dir_entry
*perfmon_dir
;
503 static pfm_uuid_t pfm_null_uuid
= {0,};
505 static spinlock_t pfm_buffer_fmt_lock
;
506 static LIST_HEAD(pfm_buffer_fmt_list
);
508 static pmu_config_t
*pmu_conf
;
510 /* sysctl() controls */
511 pfm_sysctl_t pfm_sysctl
;
512 EXPORT_SYMBOL(pfm_sysctl
);
514 static ctl_table pfm_ctl_table
[]={
515 {1, "debug", &pfm_sysctl
.debug
, sizeof(int), 0666, NULL
, &proc_dointvec
, NULL
,},
516 {2, "debug_ovfl", &pfm_sysctl
.debug_ovfl
, sizeof(int), 0666, NULL
, &proc_dointvec
, NULL
,},
517 {3, "fastctxsw", &pfm_sysctl
.fastctxsw
, sizeof(int), 0600, NULL
, &proc_dointvec
, NULL
,},
518 {4, "expert_mode", &pfm_sysctl
.expert_mode
, sizeof(int), 0600, NULL
, &proc_dointvec
, NULL
,},
521 static ctl_table pfm_sysctl_dir
[] = {
522 {1, "perfmon", NULL
, 0, 0755, pfm_ctl_table
, },
525 static ctl_table pfm_sysctl_root
[] = {
526 {1, "kernel", NULL
, 0, 0755, pfm_sysctl_dir
, },
529 static struct ctl_table_header
*pfm_sysctl_header
;
531 static int pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
532 static int pfm_flush(struct file
*filp
);
534 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
535 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
538 pfm_put_task(struct task_struct
*task
)
540 if (task
!= current
) put_task_struct(task
);
544 pfm_set_task_notify(struct task_struct
*task
)
546 struct thread_info
*info
;
548 info
= (struct thread_info
*) ((char *) task
+ IA64_TASK_SIZE
);
549 set_bit(TIF_NOTIFY_RESUME
, &info
->flags
);
553 pfm_clear_task_notify(void)
555 clear_thread_flag(TIF_NOTIFY_RESUME
);
559 pfm_reserve_page(unsigned long a
)
561 SetPageReserved(vmalloc_to_page((void *)a
));
564 pfm_unreserve_page(unsigned long a
)
566 ClearPageReserved(vmalloc_to_page((void*)a
));
569 static inline unsigned long
570 pfm_protect_ctx_ctxsw(pfm_context_t
*x
)
572 spin_lock(&(x
)->ctx_lock
);
576 static inline unsigned long
577 pfm_unprotect_ctx_ctxsw(pfm_context_t
*x
, unsigned long f
)
579 spin_unlock(&(x
)->ctx_lock
);
582 static inline unsigned int
583 pfm_do_munmap(struct mm_struct
*mm
, unsigned long addr
, size_t len
, int acct
)
585 return do_munmap(mm
, addr
, len
);
588 static inline unsigned long
589 pfm_get_unmapped_area(struct file
*file
, unsigned long addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
, unsigned long exec
)
591 return get_unmapped_area(file
, addr
, len
, pgoff
, flags
);
595 static struct super_block
*
596 pfmfs_get_sb(struct file_system_type
*fs_type
, int flags
, const char *dev_name
, void *data
)
598 return get_sb_pseudo(fs_type
, "pfm:", NULL
, PFMFS_MAGIC
);
601 static struct file_system_type pfm_fs_type
= {
603 .get_sb
= pfmfs_get_sb
,
604 .kill_sb
= kill_anon_super
,
607 DEFINE_PER_CPU(unsigned long, pfm_syst_info
);
608 DEFINE_PER_CPU(struct task_struct
*, pmu_owner
);
609 DEFINE_PER_CPU(pfm_context_t
*, pmu_ctx
);
610 DEFINE_PER_CPU(unsigned long, pmu_activation_number
);
611 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info
);
614 /* forward declaration */
615 static struct file_operations pfm_file_ops
;
618 * forward declarations
621 static void pfm_lazy_save_regs (struct task_struct
*ta
);
624 void dump_pmu_state(const char *);
625 static int pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
627 #include "perfmon_itanium.h"
628 #include "perfmon_mckinley.h"
629 #include "perfmon_generic.h"
631 static pmu_config_t
*pmu_confs
[]={
634 &pmu_conf_gen
, /* must be last */
639 static int pfm_end_notify_user(pfm_context_t
*ctx
);
642 pfm_clear_psr_pp(void)
644 ia64_rsm(IA64_PSR_PP
);
651 ia64_ssm(IA64_PSR_PP
);
656 pfm_clear_psr_up(void)
658 ia64_rsm(IA64_PSR_UP
);
665 ia64_ssm(IA64_PSR_UP
);
669 static inline unsigned long
673 tmp
= ia64_getreg(_IA64_REG_PSR
);
679 pfm_set_psr_l(unsigned long val
)
681 ia64_setreg(_IA64_REG_PSR_L
, val
);
693 pfm_unfreeze_pmu(void)
700 pfm_restore_ibrs(unsigned long *ibrs
, unsigned int nibrs
)
704 for (i
=0; i
< nibrs
; i
++) {
705 ia64_set_ibr(i
, ibrs
[i
]);
706 ia64_dv_serialize_instruction();
712 pfm_restore_dbrs(unsigned long *dbrs
, unsigned int ndbrs
)
716 for (i
=0; i
< ndbrs
; i
++) {
717 ia64_set_dbr(i
, dbrs
[i
]);
718 ia64_dv_serialize_data();
724 * PMD[i] must be a counter. no check is made
726 static inline unsigned long
727 pfm_read_soft_counter(pfm_context_t
*ctx
, int i
)
729 return ctx
->ctx_pmds
[i
].val
+ (ia64_get_pmd(i
) & pmu_conf
->ovfl_val
);
733 * PMD[i] must be a counter. no check is made
736 pfm_write_soft_counter(pfm_context_t
*ctx
, int i
, unsigned long val
)
738 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
740 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
742 * writing to unimplemented part is ignore, so we do not need to
745 ia64_set_pmd(i
, val
& ovfl_val
);
749 pfm_get_new_msg(pfm_context_t
*ctx
)
753 next
= (ctx
->ctx_msgq_tail
+1) % PFM_MAX_MSGS
;
755 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
756 if (next
== ctx
->ctx_msgq_head
) return NULL
;
758 idx
= ctx
->ctx_msgq_tail
;
759 ctx
->ctx_msgq_tail
= next
;
761 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, idx
));
763 return ctx
->ctx_msgq
+idx
;
767 pfm_get_next_msg(pfm_context_t
*ctx
)
771 DPRINT(("ctx=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
773 if (PFM_CTXQ_EMPTY(ctx
)) return NULL
;
778 msg
= ctx
->ctx_msgq
+ctx
->ctx_msgq_head
;
783 ctx
->ctx_msgq_head
= (ctx
->ctx_msgq_head
+1) % PFM_MAX_MSGS
;
785 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, msg
->pfm_gen_msg
.msg_type
));
791 pfm_reset_msgq(pfm_context_t
*ctx
)
793 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
794 DPRINT(("ctx=%p msgq reset\n", ctx
));
798 pfm_rvmalloc(unsigned long size
)
803 size
= PAGE_ALIGN(size
);
806 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
807 memset(mem
, 0, size
);
808 addr
= (unsigned long)mem
;
810 pfm_reserve_page(addr
);
819 pfm_rvfree(void *mem
, unsigned long size
)
824 DPRINT(("freeing physical buffer @%p size=%lu\n", mem
, size
));
825 addr
= (unsigned long) mem
;
826 while ((long) size
> 0) {
827 pfm_unreserve_page(addr
);
836 static pfm_context_t
*
837 pfm_context_alloc(void)
842 * allocate context descriptor
843 * must be able to free with interrupts disabled
845 ctx
= kmalloc(sizeof(pfm_context_t
), GFP_KERNEL
);
847 memset(ctx
, 0, sizeof(pfm_context_t
));
848 DPRINT(("alloc ctx @%p\n", ctx
));
854 pfm_context_free(pfm_context_t
*ctx
)
857 DPRINT(("free ctx @%p\n", ctx
));
863 pfm_mask_monitoring(struct task_struct
*task
)
865 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
866 struct thread_struct
*th
= &task
->thread
;
867 unsigned long mask
, val
, ovfl_mask
;
870 DPRINT_ovfl(("masking monitoring for [%d]\n", task
->pid
));
872 ovfl_mask
= pmu_conf
->ovfl_val
;
874 * monitoring can only be masked as a result of a valid
875 * counter overflow. In UP, it means that the PMU still
876 * has an owner. Note that the owner can be different
877 * from the current task. However the PMU state belongs
879 * In SMP, a valid overflow only happens when task is
880 * current. Therefore if we come here, we know that
881 * the PMU state belongs to the current task, therefore
882 * we can access the live registers.
884 * So in both cases, the live register contains the owner's
885 * state. We can ONLY touch the PMU registers and NOT the PSR.
887 * As a consequence to this call, the thread->pmds[] array
888 * contains stale information which must be ignored
889 * when context is reloaded AND monitoring is active (see
892 mask
= ctx
->ctx_used_pmds
[0];
893 for (i
= 0; mask
; i
++, mask
>>=1) {
894 /* skip non used pmds */
895 if ((mask
& 0x1) == 0) continue;
896 val
= ia64_get_pmd(i
);
898 if (PMD_IS_COUNTING(i
)) {
900 * we rebuild the full 64 bit value of the counter
902 ctx
->ctx_pmds
[i
].val
+= (val
& ovfl_mask
);
904 ctx
->ctx_pmds
[i
].val
= val
;
906 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
908 ctx
->ctx_pmds
[i
].val
,
912 * mask monitoring by setting the privilege level to 0
913 * we cannot use psr.pp/psr.up for this, it is controlled by
916 * if task is current, modify actual registers, otherwise modify
917 * thread save state, i.e., what will be restored in pfm_load_regs()
919 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
920 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
921 if ((mask
& 0x1) == 0UL) continue;
922 ia64_set_pmc(i
, th
->pmcs
[i
] & ~0xfUL
);
923 th
->pmcs
[i
] &= ~0xfUL
;
924 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i
, th
->pmcs
[i
]));
927 * make all of this visible
933 * must always be done with task == current
935 * context must be in MASKED state when calling
938 pfm_restore_monitoring(struct task_struct
*task
)
940 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
941 struct thread_struct
*th
= &task
->thread
;
942 unsigned long mask
, ovfl_mask
;
943 unsigned long psr
, val
;
946 is_system
= ctx
->ctx_fl_system
;
947 ovfl_mask
= pmu_conf
->ovfl_val
;
949 if (task
!= current
) {
950 printk(KERN_ERR
"perfmon.%d: invalid task[%d] current[%d]\n", __LINE__
, task
->pid
, current
->pid
);
953 if (ctx
->ctx_state
!= PFM_CTX_MASKED
) {
954 printk(KERN_ERR
"perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__
,
955 task
->pid
, current
->pid
, ctx
->ctx_state
);
960 * monitoring is masked via the PMC.
961 * As we restore their value, we do not want each counter to
962 * restart right away. We stop monitoring using the PSR,
963 * restore the PMC (and PMD) and then re-establish the psr
964 * as it was. Note that there can be no pending overflow at
965 * this point, because monitoring was MASKED.
967 * system-wide session are pinned and self-monitoring
969 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
971 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
977 * first, we restore the PMD
979 mask
= ctx
->ctx_used_pmds
[0];
980 for (i
= 0; mask
; i
++, mask
>>=1) {
981 /* skip non used pmds */
982 if ((mask
& 0x1) == 0) continue;
984 if (PMD_IS_COUNTING(i
)) {
986 * we split the 64bit value according to
989 val
= ctx
->ctx_pmds
[i
].val
& ovfl_mask
;
990 ctx
->ctx_pmds
[i
].val
&= ~ovfl_mask
;
992 val
= ctx
->ctx_pmds
[i
].val
;
994 ia64_set_pmd(i
, val
);
996 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
998 ctx
->ctx_pmds
[i
].val
,
1004 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
1005 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
1006 if ((mask
& 0x1) == 0UL) continue;
1007 th
->pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1008 ia64_set_pmc(i
, th
->pmcs
[i
]);
1009 DPRINT(("[%d] pmc[%d]=0x%lx\n", task
->pid
, i
, th
->pmcs
[i
]));
1014 * must restore DBR/IBR because could be modified while masked
1015 * XXX: need to optimize
1017 if (ctx
->ctx_fl_using_dbreg
) {
1018 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
1019 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
1025 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
1027 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
1034 pfm_save_pmds(unsigned long *pmds
, unsigned long mask
)
1040 for (i
=0; mask
; i
++, mask
>>=1) {
1041 if (mask
& 0x1) pmds
[i
] = ia64_get_pmd(i
);
1046 * reload from thread state (used for ctxw only)
1049 pfm_restore_pmds(unsigned long *pmds
, unsigned long mask
)
1052 unsigned long val
, ovfl_val
= pmu_conf
->ovfl_val
;
1054 for (i
=0; mask
; i
++, mask
>>=1) {
1055 if ((mask
& 0x1) == 0) continue;
1056 val
= PMD_IS_COUNTING(i
) ? pmds
[i
] & ovfl_val
: pmds
[i
];
1057 ia64_set_pmd(i
, val
);
1063 * propagate PMD from context to thread-state
1066 pfm_copy_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
1068 struct thread_struct
*thread
= &task
->thread
;
1069 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
1070 unsigned long mask
= ctx
->ctx_all_pmds
[0];
1074 DPRINT(("mask=0x%lx\n", mask
));
1076 for (i
=0; mask
; i
++, mask
>>=1) {
1078 val
= ctx
->ctx_pmds
[i
].val
;
1081 * We break up the 64 bit value into 2 pieces
1082 * the lower bits go to the machine state in the
1083 * thread (will be reloaded on ctxsw in).
1084 * The upper part stays in the soft-counter.
1086 if (PMD_IS_COUNTING(i
)) {
1087 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
1090 thread
->pmds
[i
] = val
;
1092 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1095 ctx
->ctx_pmds
[i
].val
));
1100 * propagate PMC from context to thread-state
1103 pfm_copy_pmcs(struct task_struct
*task
, pfm_context_t
*ctx
)
1105 struct thread_struct
*thread
= &task
->thread
;
1106 unsigned long mask
= ctx
->ctx_all_pmcs
[0];
1109 DPRINT(("mask=0x%lx\n", mask
));
1111 for (i
=0; mask
; i
++, mask
>>=1) {
1112 /* masking 0 with ovfl_val yields 0 */
1113 thread
->pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1114 DPRINT(("pmc[%d]=0x%lx\n", i
, thread
->pmcs
[i
]));
1121 pfm_restore_pmcs(unsigned long *pmcs
, unsigned long mask
)
1125 for (i
=0; mask
; i
++, mask
>>=1) {
1126 if ((mask
& 0x1) == 0) continue;
1127 ia64_set_pmc(i
, pmcs
[i
]);
1133 pfm_uuid_cmp(pfm_uuid_t a
, pfm_uuid_t b
)
1135 return memcmp(a
, b
, sizeof(pfm_uuid_t
));
1139 pfm_buf_fmt_exit(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, struct pt_regs
*regs
)
1142 if (fmt
->fmt_exit
) ret
= (*fmt
->fmt_exit
)(task
, buf
, regs
);
1147 pfm_buf_fmt_getsize(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
, int cpu
, void *arg
, unsigned long *size
)
1150 if (fmt
->fmt_getsize
) ret
= (*fmt
->fmt_getsize
)(task
, flags
, cpu
, arg
, size
);
1156 pfm_buf_fmt_validate(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
,
1160 if (fmt
->fmt_validate
) ret
= (*fmt
->fmt_validate
)(task
, flags
, cpu
, arg
);
1165 pfm_buf_fmt_init(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, unsigned int flags
,
1169 if (fmt
->fmt_init
) ret
= (*fmt
->fmt_init
)(task
, buf
, flags
, cpu
, arg
);
1174 pfm_buf_fmt_restart(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1177 if (fmt
->fmt_restart
) ret
= (*fmt
->fmt_restart
)(task
, ctrl
, buf
, regs
);
1182 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1185 if (fmt
->fmt_restart_active
) ret
= (*fmt
->fmt_restart_active
)(task
, ctrl
, buf
, regs
);
1189 static pfm_buffer_fmt_t
*
1190 __pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1192 struct list_head
* pos
;
1193 pfm_buffer_fmt_t
* entry
;
1195 list_for_each(pos
, &pfm_buffer_fmt_list
) {
1196 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
1197 if (pfm_uuid_cmp(uuid
, entry
->fmt_uuid
) == 0)
1204 * find a buffer format based on its uuid
1206 static pfm_buffer_fmt_t
*
1207 pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1209 pfm_buffer_fmt_t
* fmt
;
1210 spin_lock(&pfm_buffer_fmt_lock
);
1211 fmt
= __pfm_find_buffer_fmt(uuid
);
1212 spin_unlock(&pfm_buffer_fmt_lock
);
1217 pfm_register_buffer_fmt(pfm_buffer_fmt_t
*fmt
)
1221 /* some sanity checks */
1222 if (fmt
== NULL
|| fmt
->fmt_name
== NULL
) return -EINVAL
;
1224 /* we need at least a handler */
1225 if (fmt
->fmt_handler
== NULL
) return -EINVAL
;
1228 * XXX: need check validity of fmt_arg_size
1231 spin_lock(&pfm_buffer_fmt_lock
);
1233 if (__pfm_find_buffer_fmt(fmt
->fmt_uuid
)) {
1234 printk(KERN_ERR
"perfmon: duplicate sampling format: %s\n", fmt
->fmt_name
);
1238 list_add(&fmt
->fmt_list
, &pfm_buffer_fmt_list
);
1239 printk(KERN_INFO
"perfmon: added sampling format %s\n", fmt
->fmt_name
);
1242 spin_unlock(&pfm_buffer_fmt_lock
);
1245 EXPORT_SYMBOL(pfm_register_buffer_fmt
);
1248 pfm_unregister_buffer_fmt(pfm_uuid_t uuid
)
1250 pfm_buffer_fmt_t
*fmt
;
1253 spin_lock(&pfm_buffer_fmt_lock
);
1255 fmt
= __pfm_find_buffer_fmt(uuid
);
1257 printk(KERN_ERR
"perfmon: cannot unregister format, not found\n");
1261 list_del_init(&fmt
->fmt_list
);
1262 printk(KERN_INFO
"perfmon: removed sampling format: %s\n", fmt
->fmt_name
);
1265 spin_unlock(&pfm_buffer_fmt_lock
);
1269 EXPORT_SYMBOL(pfm_unregister_buffer_fmt
);
1271 extern void update_pal_halt_status(int);
1274 pfm_reserve_session(struct task_struct
*task
, int is_syswide
, unsigned int cpu
)
1276 unsigned long flags
;
1278 * validy checks on cpu_mask have been done upstream
1282 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1283 pfm_sessions
.pfs_sys_sessions
,
1284 pfm_sessions
.pfs_task_sessions
,
1285 pfm_sessions
.pfs_sys_use_dbregs
,
1291 * cannot mix system wide and per-task sessions
1293 if (pfm_sessions
.pfs_task_sessions
> 0UL) {
1294 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1295 pfm_sessions
.pfs_task_sessions
));
1299 if (pfm_sessions
.pfs_sys_session
[cpu
]) goto error_conflict
;
1301 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu
, smp_processor_id()));
1303 pfm_sessions
.pfs_sys_session
[cpu
] = task
;
1305 pfm_sessions
.pfs_sys_sessions
++ ;
1308 if (pfm_sessions
.pfs_sys_sessions
) goto abort
;
1309 pfm_sessions
.pfs_task_sessions
++;
1312 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1313 pfm_sessions
.pfs_sys_sessions
,
1314 pfm_sessions
.pfs_task_sessions
,
1315 pfm_sessions
.pfs_sys_use_dbregs
,
1320 * disable default_idle() to go to PAL_HALT
1322 update_pal_halt_status(0);
1329 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1330 pfm_sessions
.pfs_sys_session
[cpu
]->pid
,
1340 pfm_unreserve_session(pfm_context_t
*ctx
, int is_syswide
, unsigned int cpu
)
1342 unsigned long flags
;
1344 * validy checks on cpu_mask have been done upstream
1348 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1349 pfm_sessions
.pfs_sys_sessions
,
1350 pfm_sessions
.pfs_task_sessions
,
1351 pfm_sessions
.pfs_sys_use_dbregs
,
1357 pfm_sessions
.pfs_sys_session
[cpu
] = NULL
;
1359 * would not work with perfmon+more than one bit in cpu_mask
1361 if (ctx
&& ctx
->ctx_fl_using_dbreg
) {
1362 if (pfm_sessions
.pfs_sys_use_dbregs
== 0) {
1363 printk(KERN_ERR
"perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx
);
1365 pfm_sessions
.pfs_sys_use_dbregs
--;
1368 pfm_sessions
.pfs_sys_sessions
--;
1370 pfm_sessions
.pfs_task_sessions
--;
1372 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1373 pfm_sessions
.pfs_sys_sessions
,
1374 pfm_sessions
.pfs_task_sessions
,
1375 pfm_sessions
.pfs_sys_use_dbregs
,
1380 * if possible, enable default_idle() to go into PAL_HALT
1382 if (pfm_sessions
.pfs_task_sessions
== 0 && pfm_sessions
.pfs_sys_sessions
== 0)
1383 update_pal_halt_status(1);
1391 * removes virtual mapping of the sampling buffer.
1392 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1393 * a PROTECT_CTX() section.
1396 pfm_remove_smpl_mapping(struct task_struct
*task
, void *vaddr
, unsigned long size
)
1401 if (task
->mm
== NULL
|| size
== 0UL || vaddr
== NULL
) {
1402 printk(KERN_ERR
"perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task
->pid
, task
->mm
);
1406 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr
, size
));
1409 * does the actual unmapping
1411 down_write(&task
->mm
->mmap_sem
);
1413 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr
, size
));
1415 r
= pfm_do_munmap(task
->mm
, (unsigned long)vaddr
, size
, 0);
1417 up_write(&task
->mm
->mmap_sem
);
1419 printk(KERN_ERR
"perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task
->pid
, vaddr
, size
);
1422 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr
, size
, r
));
1428 * free actual physical storage used by sampling buffer
1432 pfm_free_smpl_buffer(pfm_context_t
*ctx
)
1434 pfm_buffer_fmt_t
*fmt
;
1436 if (ctx
->ctx_smpl_hdr
== NULL
) goto invalid_free
;
1439 * we won't use the buffer format anymore
1441 fmt
= ctx
->ctx_buf_fmt
;
1443 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1446 ctx
->ctx_smpl_vaddr
));
1448 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1453 pfm_rvfree(ctx
->ctx_smpl_hdr
, ctx
->ctx_smpl_size
);
1455 ctx
->ctx_smpl_hdr
= NULL
;
1456 ctx
->ctx_smpl_size
= 0UL;
1461 printk(KERN_ERR
"perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current
->pid
);
1467 pfm_exit_smpl_buffer(pfm_buffer_fmt_t
*fmt
)
1469 if (fmt
== NULL
) return;
1471 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1476 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1477 * no real gain from having the whole whorehouse mounted. So we don't need
1478 * any operations on the root directory. However, we need a non-trivial
1479 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1481 static struct vfsmount
*pfmfs_mnt
;
1486 int err
= register_filesystem(&pfm_fs_type
);
1488 pfmfs_mnt
= kern_mount(&pfm_fs_type
);
1489 err
= PTR_ERR(pfmfs_mnt
);
1490 if (IS_ERR(pfmfs_mnt
))
1491 unregister_filesystem(&pfm_fs_type
);
1501 unregister_filesystem(&pfm_fs_type
);
1506 pfm_read(struct file
*filp
, char __user
*buf
, size_t size
, loff_t
*ppos
)
1511 unsigned long flags
;
1512 DECLARE_WAITQUEUE(wait
, current
);
1513 if (PFM_IS_FILE(filp
) == 0) {
1514 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", current
->pid
);
1518 ctx
= (pfm_context_t
*)filp
->private_data
;
1520 printk(KERN_ERR
"perfmon: pfm_read: NULL ctx [%d]\n", current
->pid
);
1525 * check even when there is no message
1527 if (size
< sizeof(pfm_msg_t
)) {
1528 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx
, sizeof(pfm_msg_t
)));
1532 PROTECT_CTX(ctx
, flags
);
1535 * put ourselves on the wait queue
1537 add_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1545 set_current_state(TASK_INTERRUPTIBLE
);
1547 DPRINT(("head=%d tail=%d\n", ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
1550 if(PFM_CTXQ_EMPTY(ctx
) == 0) break;
1552 UNPROTECT_CTX(ctx
, flags
);
1555 * check non-blocking read
1558 if(filp
->f_flags
& O_NONBLOCK
) break;
1561 * check pending signals
1563 if(signal_pending(current
)) {
1568 * no message, so wait
1572 PROTECT_CTX(ctx
, flags
);
1574 DPRINT(("[%d] back to running ret=%ld\n", current
->pid
, ret
));
1575 set_current_state(TASK_RUNNING
);
1576 remove_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1578 if (ret
< 0) goto abort
;
1581 msg
= pfm_get_next_msg(ctx
);
1583 printk(KERN_ERR
"perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx
, current
->pid
);
1587 DPRINT(("fd=%d type=%d\n", msg
->pfm_gen_msg
.msg_ctx_fd
, msg
->pfm_gen_msg
.msg_type
));
1590 if(copy_to_user(buf
, msg
, sizeof(pfm_msg_t
)) == 0) ret
= sizeof(pfm_msg_t
);
1593 UNPROTECT_CTX(ctx
, flags
);
1599 pfm_write(struct file
*file
, const char __user
*ubuf
,
1600 size_t size
, loff_t
*ppos
)
1602 DPRINT(("pfm_write called\n"));
1607 pfm_poll(struct file
*filp
, poll_table
* wait
)
1610 unsigned long flags
;
1611 unsigned int mask
= 0;
1613 if (PFM_IS_FILE(filp
) == 0) {
1614 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", current
->pid
);
1618 ctx
= (pfm_context_t
*)filp
->private_data
;
1620 printk(KERN_ERR
"perfmon: pfm_poll: NULL ctx [%d]\n", current
->pid
);
1625 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx
->ctx_fd
));
1627 poll_wait(filp
, &ctx
->ctx_msgq_wait
, wait
);
1629 PROTECT_CTX(ctx
, flags
);
1631 if (PFM_CTXQ_EMPTY(ctx
) == 0)
1632 mask
= POLLIN
| POLLRDNORM
;
1634 UNPROTECT_CTX(ctx
, flags
);
1636 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx
->ctx_fd
, mask
));
1642 pfm_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
, unsigned long arg
)
1644 DPRINT(("pfm_ioctl called\n"));
1649 * interrupt cannot be masked when coming here
1652 pfm_do_fasync(int fd
, struct file
*filp
, pfm_context_t
*ctx
, int on
)
1656 ret
= fasync_helper (fd
, filp
, on
, &ctx
->ctx_async_queue
);
1658 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1662 ctx
->ctx_async_queue
, ret
));
1668 pfm_fasync(int fd
, struct file
*filp
, int on
)
1673 if (PFM_IS_FILE(filp
) == 0) {
1674 printk(KERN_ERR
"perfmon: pfm_fasync bad magic [%d]\n", current
->pid
);
1678 ctx
= (pfm_context_t
*)filp
->private_data
;
1680 printk(KERN_ERR
"perfmon: pfm_fasync NULL ctx [%d]\n", current
->pid
);
1684 * we cannot mask interrupts during this call because this may
1685 * may go to sleep if memory is not readily avalaible.
1687 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1688 * done in caller. Serialization of this function is ensured by caller.
1690 ret
= pfm_do_fasync(fd
, filp
, ctx
, on
);
1693 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1696 ctx
->ctx_async_queue
, ret
));
1703 * this function is exclusively called from pfm_close().
1704 * The context is not protected at that time, nor are interrupts
1705 * on the remote CPU. That's necessary to avoid deadlocks.
1708 pfm_syswide_force_stop(void *info
)
1710 pfm_context_t
*ctx
= (pfm_context_t
*)info
;
1711 struct pt_regs
*regs
= ia64_task_regs(current
);
1712 struct task_struct
*owner
;
1713 unsigned long flags
;
1716 if (ctx
->ctx_cpu
!= smp_processor_id()) {
1717 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1719 smp_processor_id());
1722 owner
= GET_PMU_OWNER();
1723 if (owner
!= ctx
->ctx_task
) {
1724 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1726 owner
->pid
, ctx
->ctx_task
->pid
);
1729 if (GET_PMU_CTX() != ctx
) {
1730 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1732 GET_PMU_CTX(), ctx
);
1736 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx
->ctx_task
->pid
));
1738 * the context is already protected in pfm_close(), we simply
1739 * need to mask interrupts to avoid a PMU interrupt race on
1742 local_irq_save(flags
);
1744 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
1746 DPRINT(("context_unload returned %d\n", ret
));
1750 * unmask interrupts, PMU interrupts are now spurious here
1752 local_irq_restore(flags
);
1756 pfm_syswide_cleanup_other_cpu(pfm_context_t
*ctx
)
1760 DPRINT(("calling CPU%d for cleanup\n", ctx
->ctx_cpu
));
1761 ret
= smp_call_function_single(ctx
->ctx_cpu
, pfm_syswide_force_stop
, ctx
, 0, 1);
1762 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx
->ctx_cpu
, ret
));
1764 #endif /* CONFIG_SMP */
1767 * called for each close(). Partially free resources.
1768 * When caller is self-monitoring, the context is unloaded.
1771 pfm_flush(struct file
*filp
)
1774 struct task_struct
*task
;
1775 struct pt_regs
*regs
;
1776 unsigned long flags
;
1777 unsigned long smpl_buf_size
= 0UL;
1778 void *smpl_buf_vaddr
= NULL
;
1779 int state
, is_system
;
1781 if (PFM_IS_FILE(filp
) == 0) {
1782 DPRINT(("bad magic for\n"));
1786 ctx
= (pfm_context_t
*)filp
->private_data
;
1788 printk(KERN_ERR
"perfmon: pfm_flush: NULL ctx [%d]\n", current
->pid
);
1793 * remove our file from the async queue, if we use this mode.
1794 * This can be done without the context being protected. We come
1795 * here when the context has become unreacheable by other tasks.
1797 * We may still have active monitoring at this point and we may
1798 * end up in pfm_overflow_handler(). However, fasync_helper()
1799 * operates with interrupts disabled and it cleans up the
1800 * queue. If the PMU handler is called prior to entering
1801 * fasync_helper() then it will send a signal. If it is
1802 * invoked after, it will find an empty queue and no
1803 * signal will be sent. In both case, we are safe
1805 if (filp
->f_flags
& FASYNC
) {
1806 DPRINT(("cleaning up async_queue=%p\n", ctx
->ctx_async_queue
));
1807 pfm_do_fasync (-1, filp
, ctx
, 0);
1810 PROTECT_CTX(ctx
, flags
);
1812 state
= ctx
->ctx_state
;
1813 is_system
= ctx
->ctx_fl_system
;
1815 task
= PFM_CTX_TASK(ctx
);
1816 regs
= ia64_task_regs(task
);
1818 DPRINT(("ctx_state=%d is_current=%d\n",
1820 task
== current
? 1 : 0));
1823 * if state == UNLOADED, then task is NULL
1827 * we must stop and unload because we are losing access to the context.
1829 if (task
== current
) {
1832 * the task IS the owner but it migrated to another CPU: that's bad
1833 * but we must handle this cleanly. Unfortunately, the kernel does
1834 * not provide a mechanism to block migration (while the context is loaded).
1836 * We need to release the resource on the ORIGINAL cpu.
1838 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
1840 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
1842 * keep context protected but unmask interrupt for IPI
1844 local_irq_restore(flags
);
1846 pfm_syswide_cleanup_other_cpu(ctx
);
1849 * restore interrupt masking
1851 local_irq_save(flags
);
1854 * context is unloaded at this point
1857 #endif /* CONFIG_SMP */
1860 DPRINT(("forcing unload\n"));
1862 * stop and unload, returning with state UNLOADED
1863 * and session unreserved.
1865 pfm_context_unload(ctx
, NULL
, 0, regs
);
1867 DPRINT(("ctx_state=%d\n", ctx
->ctx_state
));
1872 * remove virtual mapping, if any, for the calling task.
1873 * cannot reset ctx field until last user is calling close().
1875 * ctx_smpl_vaddr must never be cleared because it is needed
1876 * by every task with access to the context
1878 * When called from do_exit(), the mm context is gone already, therefore
1879 * mm is NULL, i.e., the VMA is already gone and we do not have to
1882 if (ctx
->ctx_smpl_vaddr
&& current
->mm
) {
1883 smpl_buf_vaddr
= ctx
->ctx_smpl_vaddr
;
1884 smpl_buf_size
= ctx
->ctx_smpl_size
;
1887 UNPROTECT_CTX(ctx
, flags
);
1890 * if there was a mapping, then we systematically remove it
1891 * at this point. Cannot be done inside critical section
1892 * because some VM function reenables interrupts.
1895 if (smpl_buf_vaddr
) pfm_remove_smpl_mapping(current
, smpl_buf_vaddr
, smpl_buf_size
);
1900 * called either on explicit close() or from exit_files().
1901 * Only the LAST user of the file gets to this point, i.e., it is
1904 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1905 * (fput()),i.e, last task to access the file. Nobody else can access the
1906 * file at this point.
1908 * When called from exit_files(), the VMA has been freed because exit_mm()
1909 * is executed before exit_files().
1911 * When called from exit_files(), the current task is not yet ZOMBIE but we
1912 * flush the PMU state to the context.
1915 pfm_close(struct inode
*inode
, struct file
*filp
)
1918 struct task_struct
*task
;
1919 struct pt_regs
*regs
;
1920 DECLARE_WAITQUEUE(wait
, current
);
1921 unsigned long flags
;
1922 unsigned long smpl_buf_size
= 0UL;
1923 void *smpl_buf_addr
= NULL
;
1924 int free_possible
= 1;
1925 int state
, is_system
;
1927 DPRINT(("pfm_close called private=%p\n", filp
->private_data
));
1929 if (PFM_IS_FILE(filp
) == 0) {
1930 DPRINT(("bad magic\n"));
1934 ctx
= (pfm_context_t
*)filp
->private_data
;
1936 printk(KERN_ERR
"perfmon: pfm_close: NULL ctx [%d]\n", current
->pid
);
1940 PROTECT_CTX(ctx
, flags
);
1942 state
= ctx
->ctx_state
;
1943 is_system
= ctx
->ctx_fl_system
;
1945 task
= PFM_CTX_TASK(ctx
);
1946 regs
= ia64_task_regs(task
);
1948 DPRINT(("ctx_state=%d is_current=%d\n",
1950 task
== current
? 1 : 0));
1953 * if task == current, then pfm_flush() unloaded the context
1955 if (state
== PFM_CTX_UNLOADED
) goto doit
;
1958 * context is loaded/masked and task != current, we need to
1959 * either force an unload or go zombie
1963 * The task is currently blocked or will block after an overflow.
1964 * we must force it to wakeup to get out of the
1965 * MASKED state and transition to the unloaded state by itself.
1967 * This situation is only possible for per-task mode
1969 if (state
== PFM_CTX_MASKED
&& CTX_OVFL_NOBLOCK(ctx
) == 0) {
1972 * set a "partial" zombie state to be checked
1973 * upon return from down() in pfm_handle_work().
1975 * We cannot use the ZOMBIE state, because it is checked
1976 * by pfm_load_regs() which is called upon wakeup from down().
1977 * In such case, it would free the context and then we would
1978 * return to pfm_handle_work() which would access the
1979 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1980 * but visible to pfm_handle_work().
1982 * For some window of time, we have a zombie context with
1983 * ctx_state = MASKED and not ZOMBIE
1985 ctx
->ctx_fl_going_zombie
= 1;
1988 * force task to wake up from MASKED state
1990 up(&ctx
->ctx_restart_sem
);
1992 DPRINT(("waking up ctx_state=%d\n", state
));
1995 * put ourself to sleep waiting for the other
1996 * task to report completion
1998 * the context is protected by mutex, therefore there
1999 * is no risk of being notified of completion before
2000 * begin actually on the waitq.
2002 set_current_state(TASK_INTERRUPTIBLE
);
2003 add_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2005 UNPROTECT_CTX(ctx
, flags
);
2008 * XXX: check for signals :
2009 * - ok for explicit close
2010 * - not ok when coming from exit_files()
2015 PROTECT_CTX(ctx
, flags
);
2018 remove_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2019 set_current_state(TASK_RUNNING
);
2022 * context is unloaded at this point
2024 DPRINT(("after zombie wakeup ctx_state=%d for\n", state
));
2026 else if (task
!= current
) {
2029 * switch context to zombie state
2031 ctx
->ctx_state
= PFM_CTX_ZOMBIE
;
2033 DPRINT(("zombie ctx for [%d]\n", task
->pid
));
2035 * cannot free the context on the spot. deferred until
2036 * the task notices the ZOMBIE state
2040 pfm_context_unload(ctx
, NULL
, 0, regs
);
2045 /* reload state, may have changed during opening of critical section */
2046 state
= ctx
->ctx_state
;
2049 * the context is still attached to a task (possibly current)
2050 * we cannot destroy it right now
2054 * we must free the sampling buffer right here because
2055 * we cannot rely on it being cleaned up later by the
2056 * monitored task. It is not possible to free vmalloc'ed
2057 * memory in pfm_load_regs(). Instead, we remove the buffer
2058 * now. should there be subsequent PMU overflow originally
2059 * meant for sampling, the will be converted to spurious
2060 * and that's fine because the monitoring tools is gone anyway.
2062 if (ctx
->ctx_smpl_hdr
) {
2063 smpl_buf_addr
= ctx
->ctx_smpl_hdr
;
2064 smpl_buf_size
= ctx
->ctx_smpl_size
;
2065 /* no more sampling */
2066 ctx
->ctx_smpl_hdr
= NULL
;
2067 ctx
->ctx_fl_is_sampling
= 0;
2070 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2076 if (smpl_buf_addr
) pfm_exit_smpl_buffer(ctx
->ctx_buf_fmt
);
2079 * UNLOADED that the session has already been unreserved.
2081 if (state
== PFM_CTX_ZOMBIE
) {
2082 pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, ctx
->ctx_cpu
);
2086 * disconnect file descriptor from context must be done
2089 filp
->private_data
= NULL
;
2092 * if we free on the spot, the context is now completely unreacheable
2093 * from the callers side. The monitored task side is also cut, so we
2096 * If we have a deferred free, only the caller side is disconnected.
2098 UNPROTECT_CTX(ctx
, flags
);
2101 * All memory free operations (especially for vmalloc'ed memory)
2102 * MUST be done with interrupts ENABLED.
2104 if (smpl_buf_addr
) pfm_rvfree(smpl_buf_addr
, smpl_buf_size
);
2107 * return the memory used by the context
2109 if (free_possible
) pfm_context_free(ctx
);
2115 pfm_no_open(struct inode
*irrelevant
, struct file
*dontcare
)
2117 DPRINT(("pfm_no_open called\n"));
2123 static struct file_operations pfm_file_ops
= {
2124 .llseek
= no_llseek
,
2129 .open
= pfm_no_open
, /* special open code to disallow open via /proc */
2130 .fasync
= pfm_fasync
,
2131 .release
= pfm_close
,
2136 pfmfs_delete_dentry(struct dentry
*dentry
)
2141 static struct dentry_operations pfmfs_dentry_operations
= {
2142 .d_delete
= pfmfs_delete_dentry
,
2147 pfm_alloc_fd(struct file
**cfile
)
2150 struct file
*file
= NULL
;
2151 struct inode
* inode
;
2155 fd
= get_unused_fd();
2156 if (fd
< 0) return -ENFILE
;
2160 file
= get_empty_filp();
2161 if (!file
) goto out
;
2164 * allocate a new inode
2166 inode
= new_inode(pfmfs_mnt
->mnt_sb
);
2167 if (!inode
) goto out
;
2169 DPRINT(("new inode ino=%ld @%p\n", inode
->i_ino
, inode
));
2171 inode
->i_mode
= S_IFCHR
|S_IRUGO
;
2172 inode
->i_uid
= current
->fsuid
;
2173 inode
->i_gid
= current
->fsgid
;
2175 sprintf(name
, "[%lu]", inode
->i_ino
);
2177 this.len
= strlen(name
);
2178 this.hash
= inode
->i_ino
;
2183 * allocate a new dcache entry
2185 file
->f_dentry
= d_alloc(pfmfs_mnt
->mnt_sb
->s_root
, &this);
2186 if (!file
->f_dentry
) goto out
;
2188 file
->f_dentry
->d_op
= &pfmfs_dentry_operations
;
2190 d_add(file
->f_dentry
, inode
);
2191 file
->f_vfsmnt
= mntget(pfmfs_mnt
);
2192 file
->f_mapping
= inode
->i_mapping
;
2194 file
->f_op
= &pfm_file_ops
;
2195 file
->f_mode
= FMODE_READ
;
2196 file
->f_flags
= O_RDONLY
;
2200 * may have to delay until context is attached?
2202 fd_install(fd
, file
);
2205 * the file structure we will use
2211 if (file
) put_filp(file
);
2217 pfm_free_fd(int fd
, struct file
*file
)
2219 struct files_struct
*files
= current
->files
;
2222 * there ie no fd_uninstall(), so we do it here
2224 spin_lock(&files
->file_lock
);
2225 files
->fd
[fd
] = NULL
;
2226 spin_unlock(&files
->file_lock
);
2228 if (file
) put_filp(file
);
2233 pfm_remap_buffer(struct vm_area_struct
*vma
, unsigned long buf
, unsigned long addr
, unsigned long size
)
2235 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf
, addr
, size
));
2238 unsigned long pfn
= ia64_tpa(buf
) >> PAGE_SHIFT
;
2241 if (remap_pfn_range(vma
, addr
, pfn
, PAGE_SIZE
, PAGE_READONLY
))
2252 * allocate a sampling buffer and remaps it into the user address space of the task
2255 pfm_smpl_buffer_alloc(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned long rsize
, void **user_vaddr
)
2257 struct mm_struct
*mm
= task
->mm
;
2258 struct vm_area_struct
*vma
= NULL
;
2264 * the fixed header + requested size and align to page boundary
2266 size
= PAGE_ALIGN(rsize
);
2268 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize
, size
));
2271 * check requested size to avoid Denial-of-service attacks
2272 * XXX: may have to refine this test
2273 * Check against address space limit.
2275 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2278 if (size
> task
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
2282 * We do the easy to undo allocations first.
2284 * pfm_rvmalloc(), clears the buffer, so there is no leak
2286 smpl_buf
= pfm_rvmalloc(size
);
2287 if (smpl_buf
== NULL
) {
2288 DPRINT(("Can't allocate sampling buffer\n"));
2292 DPRINT(("smpl_buf @%p\n", smpl_buf
));
2295 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
2297 DPRINT(("Cannot allocate vma\n"));
2300 memset(vma
, 0, sizeof(*vma
));
2303 * partially initialize the vma for the sampling buffer
2306 vma
->vm_flags
= VM_READ
| VM_MAYREAD
|VM_RESERVED
;
2307 vma
->vm_page_prot
= PAGE_READONLY
; /* XXX may need to change */
2310 * Now we have everything we need and we can initialize
2311 * and connect all the data structures
2314 ctx
->ctx_smpl_hdr
= smpl_buf
;
2315 ctx
->ctx_smpl_size
= size
; /* aligned size */
2318 * Let's do the difficult operations next.
2320 * now we atomically find some area in the address space and
2321 * remap the buffer in it.
2323 down_write(&task
->mm
->mmap_sem
);
2325 /* find some free area in address space, must have mmap sem held */
2326 vma
->vm_start
= pfm_get_unmapped_area(NULL
, 0, size
, 0, MAP_PRIVATE
|MAP_ANONYMOUS
, 0);
2327 if (vma
->vm_start
== 0UL) {
2328 DPRINT(("Cannot find unmapped area for size %ld\n", size
));
2329 up_write(&task
->mm
->mmap_sem
);
2332 vma
->vm_end
= vma
->vm_start
+ size
;
2333 vma
->vm_pgoff
= vma
->vm_start
>> PAGE_SHIFT
;
2335 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size
, ctx
->ctx_smpl_hdr
, vma
->vm_start
));
2337 /* can only be applied to current task, need to have the mm semaphore held when called */
2338 if (pfm_remap_buffer(vma
, (unsigned long)smpl_buf
, vma
->vm_start
, size
)) {
2339 DPRINT(("Can't remap buffer\n"));
2340 up_write(&task
->mm
->mmap_sem
);
2345 * now insert the vma in the vm list for the process, must be
2346 * done with mmap lock held
2348 insert_vm_struct(mm
, vma
);
2350 mm
->total_vm
+= size
>> PAGE_SHIFT
;
2351 vm_stat_account(vma
);
2352 up_write(&task
->mm
->mmap_sem
);
2355 * keep track of user level virtual address
2357 ctx
->ctx_smpl_vaddr
= (void *)vma
->vm_start
;
2358 *(unsigned long *)user_vaddr
= vma
->vm_start
;
2363 kmem_cache_free(vm_area_cachep
, vma
);
2365 pfm_rvfree(smpl_buf
, size
);
2371 * XXX: do something better here
2374 pfm_bad_permissions(struct task_struct
*task
)
2376 /* inspired by ptrace_attach() */
2377 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2386 return ((current
->uid
!= task
->euid
)
2387 || (current
->uid
!= task
->suid
)
2388 || (current
->uid
!= task
->uid
)
2389 || (current
->gid
!= task
->egid
)
2390 || (current
->gid
!= task
->sgid
)
2391 || (current
->gid
!= task
->gid
)) && !capable(CAP_SYS_PTRACE
);
2395 pfarg_is_sane(struct task_struct
*task
, pfarg_context_t
*pfx
)
2401 ctx_flags
= pfx
->ctx_flags
;
2403 if (ctx_flags
& PFM_FL_SYSTEM_WIDE
) {
2406 * cannot block in this mode
2408 if (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) {
2409 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2414 /* probably more to add here */
2420 pfm_setup_buffer_fmt(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned int ctx_flags
,
2421 unsigned int cpu
, pfarg_context_t
*arg
)
2423 pfm_buffer_fmt_t
*fmt
= NULL
;
2424 unsigned long size
= 0UL;
2426 void *fmt_arg
= NULL
;
2428 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2430 /* invoke and lock buffer format, if found */
2431 fmt
= pfm_find_buffer_fmt(arg
->ctx_smpl_buf_id
);
2433 DPRINT(("[%d] cannot find buffer format\n", task
->pid
));
2438 * buffer argument MUST be contiguous to pfarg_context_t
2440 if (fmt
->fmt_arg_size
) fmt_arg
= PFM_CTXARG_BUF_ARG(arg
);
2442 ret
= pfm_buf_fmt_validate(fmt
, task
, ctx_flags
, cpu
, fmt_arg
);
2444 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task
->pid
, ctx_flags
, cpu
, fmt_arg
, ret
));
2446 if (ret
) goto error
;
2448 /* link buffer format and context */
2449 ctx
->ctx_buf_fmt
= fmt
;
2452 * check if buffer format wants to use perfmon buffer allocation/mapping service
2454 ret
= pfm_buf_fmt_getsize(fmt
, task
, ctx_flags
, cpu
, fmt_arg
, &size
);
2455 if (ret
) goto error
;
2459 * buffer is always remapped into the caller's address space
2461 ret
= pfm_smpl_buffer_alloc(current
, ctx
, size
, &uaddr
);
2462 if (ret
) goto error
;
2464 /* keep track of user address of buffer */
2465 arg
->ctx_smpl_vaddr
= uaddr
;
2467 ret
= pfm_buf_fmt_init(fmt
, task
, ctx
->ctx_smpl_hdr
, ctx_flags
, cpu
, fmt_arg
);
2474 pfm_reset_pmu_state(pfm_context_t
*ctx
)
2479 * install reset values for PMC.
2481 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
2482 if (PMC_IS_IMPL(i
) == 0) continue;
2483 ctx
->ctx_pmcs
[i
] = PMC_DFL_VAL(i
);
2484 DPRINT(("pmc[%d]=0x%lx\n", i
, ctx
->ctx_pmcs
[i
]));
2487 * PMD registers are set to 0UL when the context in memset()
2491 * On context switched restore, we must restore ALL pmc and ALL pmd even
2492 * when they are not actively used by the task. In UP, the incoming process
2493 * may otherwise pick up left over PMC, PMD state from the previous process.
2494 * As opposed to PMD, stale PMC can cause harm to the incoming
2495 * process because they may change what is being measured.
2496 * Therefore, we must systematically reinstall the entire
2497 * PMC state. In SMP, the same thing is possible on the
2498 * same CPU but also on between 2 CPUs.
2500 * The problem with PMD is information leaking especially
2501 * to user level when psr.sp=0
2503 * There is unfortunately no easy way to avoid this problem
2504 * on either UP or SMP. This definitively slows down the
2505 * pfm_load_regs() function.
2509 * bitmask of all PMCs accessible to this context
2511 * PMC0 is treated differently.
2513 ctx
->ctx_all_pmcs
[0] = pmu_conf
->impl_pmcs
[0] & ~0x1;
2516 * bitmask of all PMDs that are accesible to this context
2518 ctx
->ctx_all_pmds
[0] = pmu_conf
->impl_pmds
[0];
2520 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx
->ctx_fd
, ctx
->ctx_all_pmcs
[0],ctx
->ctx_all_pmds
[0]));
2523 * useful in case of re-enable after disable
2525 ctx
->ctx_used_ibrs
[0] = 0UL;
2526 ctx
->ctx_used_dbrs
[0] = 0UL;
2530 pfm_ctx_getsize(void *arg
, size_t *sz
)
2532 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2533 pfm_buffer_fmt_t
*fmt
;
2537 if (!pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) return 0;
2539 fmt
= pfm_find_buffer_fmt(req
->ctx_smpl_buf_id
);
2541 DPRINT(("cannot find buffer format\n"));
2544 /* get just enough to copy in user parameters */
2545 *sz
= fmt
->fmt_arg_size
;
2546 DPRINT(("arg_size=%lu\n", *sz
));
2554 * cannot attach if :
2556 * - task not owned by caller
2557 * - task incompatible with context mode
2560 pfm_task_incompatible(pfm_context_t
*ctx
, struct task_struct
*task
)
2563 * no kernel task or task not owner by caller
2565 if (task
->mm
== NULL
) {
2566 DPRINT(("task [%d] has not memory context (kernel thread)\n", task
->pid
));
2569 if (pfm_bad_permissions(task
)) {
2570 DPRINT(("no permission to attach to [%d]\n", task
->pid
));
2574 * cannot block in self-monitoring mode
2576 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && task
== current
) {
2577 DPRINT(("cannot load a blocking context on self for [%d]\n", task
->pid
));
2581 if (task
->exit_state
== EXIT_ZOMBIE
) {
2582 DPRINT(("cannot attach to zombie task [%d]\n", task
->pid
));
2587 * always ok for self
2589 if (task
== current
) return 0;
2591 if ((task
->state
!= TASK_STOPPED
) && (task
->state
!= TASK_TRACED
)) {
2592 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task
->pid
, task
->state
));
2596 * make sure the task is off any CPU
2598 wait_task_inactive(task
);
2600 /* more to come... */
2606 pfm_get_task(pfm_context_t
*ctx
, pid_t pid
, struct task_struct
**task
)
2608 struct task_struct
*p
= current
;
2611 /* XXX: need to add more checks here */
2612 if (pid
< 2) return -EPERM
;
2614 if (pid
!= current
->pid
) {
2616 read_lock(&tasklist_lock
);
2618 p
= find_task_by_pid(pid
);
2620 /* make sure task cannot go away while we operate on it */
2621 if (p
) get_task_struct(p
);
2623 read_unlock(&tasklist_lock
);
2625 if (p
== NULL
) return -ESRCH
;
2628 ret
= pfm_task_incompatible(ctx
, p
);
2631 } else if (p
!= current
) {
2640 pfm_context_create(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2642 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2647 /* let's check the arguments first */
2648 ret
= pfarg_is_sane(current
, req
);
2649 if (ret
< 0) return ret
;
2651 ctx_flags
= req
->ctx_flags
;
2655 ctx
= pfm_context_alloc();
2656 if (!ctx
) goto error
;
2658 ret
= pfm_alloc_fd(&filp
);
2659 if (ret
< 0) goto error_file
;
2661 req
->ctx_fd
= ctx
->ctx_fd
= ret
;
2664 * attach context to file
2666 filp
->private_data
= ctx
;
2669 * does the user want to sample?
2671 if (pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) {
2672 ret
= pfm_setup_buffer_fmt(current
, ctx
, ctx_flags
, 0, req
);
2673 if (ret
) goto buffer_error
;
2677 * init context protection lock
2679 spin_lock_init(&ctx
->ctx_lock
);
2682 * context is unloaded
2684 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
2687 * initialization of context's flags
2689 ctx
->ctx_fl_block
= (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) ? 1 : 0;
2690 ctx
->ctx_fl_system
= (ctx_flags
& PFM_FL_SYSTEM_WIDE
) ? 1: 0;
2691 ctx
->ctx_fl_is_sampling
= ctx
->ctx_buf_fmt
? 1 : 0; /* assume record() is defined */
2692 ctx
->ctx_fl_no_msg
= (ctx_flags
& PFM_FL_OVFL_NO_MSG
) ? 1: 0;
2694 * will move to set properties
2695 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2699 * init restart semaphore to locked
2701 sema_init(&ctx
->ctx_restart_sem
, 0);
2704 * activation is used in SMP only
2706 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
2707 SET_LAST_CPU(ctx
, -1);
2710 * initialize notification message queue
2712 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
2713 init_waitqueue_head(&ctx
->ctx_msgq_wait
);
2714 init_waitqueue_head(&ctx
->ctx_zombieq
);
2716 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2721 ctx
->ctx_fl_excl_idle
,
2726 * initialize soft PMU state
2728 pfm_reset_pmu_state(ctx
);
2733 pfm_free_fd(ctx
->ctx_fd
, filp
);
2735 if (ctx
->ctx_buf_fmt
) {
2736 pfm_buf_fmt_exit(ctx
->ctx_buf_fmt
, current
, NULL
, regs
);
2739 pfm_context_free(ctx
);
2745 static inline unsigned long
2746 pfm_new_counter_value (pfm_counter_t
*reg
, int is_long_reset
)
2748 unsigned long val
= is_long_reset
? reg
->long_reset
: reg
->short_reset
;
2749 unsigned long new_seed
, old_seed
= reg
->seed
, mask
= reg
->mask
;
2750 extern unsigned long carta_random32 (unsigned long seed
);
2752 if (reg
->flags
& PFM_REGFL_RANDOM
) {
2753 new_seed
= carta_random32(old_seed
);
2754 val
-= (old_seed
& mask
); /* counter values are negative numbers! */
2755 if ((mask
>> 32) != 0)
2756 /* construct a full 64-bit random value: */
2757 new_seed
|= carta_random32(old_seed
>> 32) << 32;
2758 reg
->seed
= new_seed
;
2765 pfm_reset_regs_masked(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2767 unsigned long mask
= ovfl_regs
[0];
2768 unsigned long reset_others
= 0UL;
2773 * now restore reset value on sampling overflowed counters
2775 mask
>>= PMU_FIRST_COUNTER
;
2776 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2778 if ((mask
& 0x1UL
) == 0UL) continue;
2780 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2781 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2783 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2787 * Now take care of resetting the other registers
2789 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2791 if ((reset_others
& 0x1) == 0) continue;
2793 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2795 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2796 is_long_reset
? "long" : "short", i
, val
));
2801 pfm_reset_regs(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2803 unsigned long mask
= ovfl_regs
[0];
2804 unsigned long reset_others
= 0UL;
2808 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs
[0], is_long_reset
));
2810 if (ctx
->ctx_state
== PFM_CTX_MASKED
) {
2811 pfm_reset_regs_masked(ctx
, ovfl_regs
, is_long_reset
);
2816 * now restore reset value on sampling overflowed counters
2818 mask
>>= PMU_FIRST_COUNTER
;
2819 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2821 if ((mask
& 0x1UL
) == 0UL) continue;
2823 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2824 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2826 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2828 pfm_write_soft_counter(ctx
, i
, val
);
2832 * Now take care of resetting the other registers
2834 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2836 if ((reset_others
& 0x1) == 0) continue;
2838 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2840 if (PMD_IS_COUNTING(i
)) {
2841 pfm_write_soft_counter(ctx
, i
, val
);
2843 ia64_set_pmd(i
, val
);
2845 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2846 is_long_reset
? "long" : "short", i
, val
));
2852 pfm_write_pmcs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2854 struct thread_struct
*thread
= NULL
;
2855 struct task_struct
*task
;
2856 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
2857 unsigned long value
, pmc_pm
;
2858 unsigned long smpl_pmds
, reset_pmds
, impl_pmds
;
2859 unsigned int cnum
, reg_flags
, flags
, pmc_type
;
2860 int i
, can_access_pmu
= 0, is_loaded
, is_system
, expert_mode
;
2861 int is_monitor
, is_counting
, state
;
2863 pfm_reg_check_t wr_func
;
2864 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2866 state
= ctx
->ctx_state
;
2867 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
2868 is_system
= ctx
->ctx_fl_system
;
2869 task
= ctx
->ctx_task
;
2870 impl_pmds
= pmu_conf
->impl_pmds
[0];
2872 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
2875 thread
= &task
->thread
;
2877 * In system wide and when the context is loaded, access can only happen
2878 * when the caller is running on the CPU being monitored by the session.
2879 * It does not have to be the owner (ctx_task) of the context per se.
2881 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
2882 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
2885 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
2887 expert_mode
= pfm_sysctl
.expert_mode
;
2889 for (i
= 0; i
< count
; i
++, req
++) {
2891 cnum
= req
->reg_num
;
2892 reg_flags
= req
->reg_flags
;
2893 value
= req
->reg_value
;
2894 smpl_pmds
= req
->reg_smpl_pmds
[0];
2895 reset_pmds
= req
->reg_reset_pmds
[0];
2899 if (cnum
>= PMU_MAX_PMCS
) {
2900 DPRINT(("pmc%u is invalid\n", cnum
));
2904 pmc_type
= pmu_conf
->pmc_desc
[cnum
].type
;
2905 pmc_pm
= (value
>> pmu_conf
->pmc_desc
[cnum
].pm_pos
) & 0x1;
2906 is_counting
= (pmc_type
& PFM_REG_COUNTING
) == PFM_REG_COUNTING
? 1 : 0;
2907 is_monitor
= (pmc_type
& PFM_REG_MONITOR
) == PFM_REG_MONITOR
? 1 : 0;
2910 * we reject all non implemented PMC as well
2911 * as attempts to modify PMC[0-3] which are used
2912 * as status registers by the PMU
2914 if ((pmc_type
& PFM_REG_IMPL
) == 0 || (pmc_type
& PFM_REG_CONTROL
) == PFM_REG_CONTROL
) {
2915 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum
, pmc_type
));
2918 wr_func
= pmu_conf
->pmc_desc
[cnum
].write_check
;
2920 * If the PMC is a monitor, then if the value is not the default:
2921 * - system-wide session: PMCx.pm=1 (privileged monitor)
2922 * - per-task : PMCx.pm=0 (user monitor)
2924 if (is_monitor
&& value
!= PMC_DFL_VAL(cnum
) && is_system
^ pmc_pm
) {
2925 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2934 * enforce generation of overflow interrupt. Necessary on all
2937 value
|= 1 << PMU_PMC_OI
;
2939 if (reg_flags
& PFM_REGFL_OVFL_NOTIFY
) {
2940 flags
|= PFM_REGFL_OVFL_NOTIFY
;
2943 if (reg_flags
& PFM_REGFL_RANDOM
) flags
|= PFM_REGFL_RANDOM
;
2945 /* verify validity of smpl_pmds */
2946 if ((smpl_pmds
& impl_pmds
) != smpl_pmds
) {
2947 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds
, cnum
));
2951 /* verify validity of reset_pmds */
2952 if ((reset_pmds
& impl_pmds
) != reset_pmds
) {
2953 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds
, cnum
));
2957 if (reg_flags
& (PFM_REGFL_OVFL_NOTIFY
|PFM_REGFL_RANDOM
)) {
2958 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum
));
2961 /* eventid on non-counting monitors are ignored */
2965 * execute write checker, if any
2967 if (likely(expert_mode
== 0 && wr_func
)) {
2968 ret
= (*wr_func
)(task
, ctx
, cnum
, &value
, regs
);
2969 if (ret
) goto error
;
2974 * no error on this register
2976 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
2979 * Now we commit the changes to the software state
2983 * update overflow information
2987 * full flag update each time a register is programmed
2989 ctx
->ctx_pmds
[cnum
].flags
= flags
;
2991 ctx
->ctx_pmds
[cnum
].reset_pmds
[0] = reset_pmds
;
2992 ctx
->ctx_pmds
[cnum
].smpl_pmds
[0] = smpl_pmds
;
2993 ctx
->ctx_pmds
[cnum
].eventid
= req
->reg_smpl_eventid
;
2996 * Mark all PMDS to be accessed as used.
2998 * We do not keep track of PMC because we have to
2999 * systematically restore ALL of them.
3001 * We do not update the used_monitors mask, because
3002 * if we have not programmed them, then will be in
3003 * a quiescent state, therefore we will not need to
3004 * mask/restore then when context is MASKED.
3006 CTX_USED_PMD(ctx
, reset_pmds
);
3007 CTX_USED_PMD(ctx
, smpl_pmds
);
3009 * make sure we do not try to reset on
3010 * restart because we have established new values
3012 if (state
== PFM_CTX_MASKED
) ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3015 * Needed in case the user does not initialize the equivalent
3016 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3017 * possible leak here.
3019 CTX_USED_PMD(ctx
, pmu_conf
->pmc_desc
[cnum
].dep_pmd
[0]);
3022 * keep track of the monitor PMC that we are using.
3023 * we save the value of the pmc in ctx_pmcs[] and if
3024 * the monitoring is not stopped for the context we also
3025 * place it in the saved state area so that it will be
3026 * picked up later by the context switch code.
3028 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3030 * The value in thread->pmcs[] may be modified on overflow, i.e., when
3031 * monitoring needs to be stopped.
3033 if (is_monitor
) CTX_USED_MONITOR(ctx
, 1UL << cnum
);
3036 * update context state
3038 ctx
->ctx_pmcs
[cnum
] = value
;
3042 * write thread state
3044 if (is_system
== 0) thread
->pmcs
[cnum
] = value
;
3047 * write hardware register if we can
3049 if (can_access_pmu
) {
3050 ia64_set_pmc(cnum
, value
);
3055 * per-task SMP only here
3057 * we are guaranteed that the task is not running on the other CPU,
3058 * we indicate that this PMD will need to be reloaded if the task
3059 * is rescheduled on the CPU it ran last on.
3061 ctx
->ctx_reload_pmcs
[0] |= 1UL << cnum
;
3066 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3072 ctx
->ctx_all_pmcs
[0],
3073 ctx
->ctx_used_pmds
[0],
3074 ctx
->ctx_pmds
[cnum
].eventid
,
3077 ctx
->ctx_reload_pmcs
[0],
3078 ctx
->ctx_used_monitors
[0],
3079 ctx
->ctx_ovfl_regs
[0]));
3083 * make sure the changes are visible
3085 if (can_access_pmu
) ia64_srlz_d();
3089 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3094 pfm_write_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3096 struct thread_struct
*thread
= NULL
;
3097 struct task_struct
*task
;
3098 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3099 unsigned long value
, hw_value
, ovfl_mask
;
3101 int i
, can_access_pmu
= 0, state
;
3102 int is_counting
, is_loaded
, is_system
, expert_mode
;
3104 pfm_reg_check_t wr_func
;
3107 state
= ctx
->ctx_state
;
3108 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3109 is_system
= ctx
->ctx_fl_system
;
3110 ovfl_mask
= pmu_conf
->ovfl_val
;
3111 task
= ctx
->ctx_task
;
3113 if (unlikely(state
== PFM_CTX_ZOMBIE
)) return -EINVAL
;
3116 * on both UP and SMP, we can only write to the PMC when the task is
3117 * the owner of the local PMU.
3119 if (likely(is_loaded
)) {
3120 thread
= &task
->thread
;
3122 * In system wide and when the context is loaded, access can only happen
3123 * when the caller is running on the CPU being monitored by the session.
3124 * It does not have to be the owner (ctx_task) of the context per se.
3126 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3127 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3130 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3132 expert_mode
= pfm_sysctl
.expert_mode
;
3134 for (i
= 0; i
< count
; i
++, req
++) {
3136 cnum
= req
->reg_num
;
3137 value
= req
->reg_value
;
3139 if (!PMD_IS_IMPL(cnum
)) {
3140 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum
));
3143 is_counting
= PMD_IS_COUNTING(cnum
);
3144 wr_func
= pmu_conf
->pmd_desc
[cnum
].write_check
;
3147 * execute write checker, if any
3149 if (unlikely(expert_mode
== 0 && wr_func
)) {
3150 unsigned long v
= value
;
3152 ret
= (*wr_func
)(task
, ctx
, cnum
, &v
, regs
);
3153 if (ret
) goto abort_mission
;
3160 * no error on this register
3162 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
3165 * now commit changes to software state
3170 * update virtualized (64bits) counter
3174 * write context state
3176 ctx
->ctx_pmds
[cnum
].lval
= value
;
3179 * when context is load we use the split value
3182 hw_value
= value
& ovfl_mask
;
3183 value
= value
& ~ovfl_mask
;
3187 * update reset values (not just for counters)
3189 ctx
->ctx_pmds
[cnum
].long_reset
= req
->reg_long_reset
;
3190 ctx
->ctx_pmds
[cnum
].short_reset
= req
->reg_short_reset
;
3193 * update randomization parameters (not just for counters)
3195 ctx
->ctx_pmds
[cnum
].seed
= req
->reg_random_seed
;
3196 ctx
->ctx_pmds
[cnum
].mask
= req
->reg_random_mask
;
3199 * update context value
3201 ctx
->ctx_pmds
[cnum
].val
= value
;
3204 * Keep track of what we use
3206 * We do not keep track of PMC because we have to
3207 * systematically restore ALL of them.
3209 CTX_USED_PMD(ctx
, PMD_PMD_DEP(cnum
));
3212 * mark this PMD register used as well
3214 CTX_USED_PMD(ctx
, RDEP(cnum
));
3217 * make sure we do not try to reset on
3218 * restart because we have established new values
3220 if (is_counting
&& state
== PFM_CTX_MASKED
) {
3221 ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3226 * write thread state
3228 if (is_system
== 0) thread
->pmds
[cnum
] = hw_value
;
3231 * write hardware register if we can
3233 if (can_access_pmu
) {
3234 ia64_set_pmd(cnum
, hw_value
);
3238 * we are guaranteed that the task is not running on the other CPU,
3239 * we indicate that this PMD will need to be reloaded if the task
3240 * is rescheduled on the CPU it ran last on.
3242 ctx
->ctx_reload_pmds
[0] |= 1UL << cnum
;
3247 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3248 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3254 ctx
->ctx_pmds
[cnum
].val
,
3255 ctx
->ctx_pmds
[cnum
].short_reset
,
3256 ctx
->ctx_pmds
[cnum
].long_reset
,
3257 PMC_OVFL_NOTIFY(ctx
, cnum
) ? 'Y':'N',
3258 ctx
->ctx_pmds
[cnum
].seed
,
3259 ctx
->ctx_pmds
[cnum
].mask
,
3260 ctx
->ctx_used_pmds
[0],
3261 ctx
->ctx_pmds
[cnum
].reset_pmds
[0],
3262 ctx
->ctx_reload_pmds
[0],
3263 ctx
->ctx_all_pmds
[0],
3264 ctx
->ctx_ovfl_regs
[0]));
3268 * make changes visible
3270 if (can_access_pmu
) ia64_srlz_d();
3276 * for now, we have only one possibility for error
3278 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3283 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3284 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3285 * interrupt is delivered during the call, it will be kept pending until we leave, making
3286 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3287 * guaranteed to return consistent data to the user, it may simply be old. It is not
3288 * trivial to treat the overflow while inside the call because you may end up in
3289 * some module sampling buffer code causing deadlocks.
3292 pfm_read_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3294 struct thread_struct
*thread
= NULL
;
3295 struct task_struct
*task
;
3296 unsigned long val
= 0UL, lval
, ovfl_mask
, sval
;
3297 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3298 unsigned int cnum
, reg_flags
= 0;
3299 int i
, can_access_pmu
= 0, state
;
3300 int is_loaded
, is_system
, is_counting
, expert_mode
;
3302 pfm_reg_check_t rd_func
;
3305 * access is possible when loaded only for
3306 * self-monitoring tasks or in UP mode
3309 state
= ctx
->ctx_state
;
3310 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3311 is_system
= ctx
->ctx_fl_system
;
3312 ovfl_mask
= pmu_conf
->ovfl_val
;
3313 task
= ctx
->ctx_task
;
3315 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3317 if (likely(is_loaded
)) {
3318 thread
= &task
->thread
;
3320 * In system wide and when the context is loaded, access can only happen
3321 * when the caller is running on the CPU being monitored by the session.
3322 * It does not have to be the owner (ctx_task) of the context per se.
3324 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3325 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3329 * this can be true when not self-monitoring only in UP
3331 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3333 if (can_access_pmu
) ia64_srlz_d();
3335 expert_mode
= pfm_sysctl
.expert_mode
;
3337 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3343 * on both UP and SMP, we can only read the PMD from the hardware register when
3344 * the task is the owner of the local PMU.
3347 for (i
= 0; i
< count
; i
++, req
++) {
3349 cnum
= req
->reg_num
;
3350 reg_flags
= req
->reg_flags
;
3352 if (unlikely(!PMD_IS_IMPL(cnum
))) goto error
;
3354 * we can only read the register that we use. That includes
3355 * the one we explicitely initialize AND the one we want included
3356 * in the sampling buffer (smpl_regs).
3358 * Having this restriction allows optimization in the ctxsw routine
3359 * without compromising security (leaks)
3361 if (unlikely(!CTX_IS_USED_PMD(ctx
, cnum
))) goto error
;
3363 sval
= ctx
->ctx_pmds
[cnum
].val
;
3364 lval
= ctx
->ctx_pmds
[cnum
].lval
;
3365 is_counting
= PMD_IS_COUNTING(cnum
);
3368 * If the task is not the current one, then we check if the
3369 * PMU state is still in the local live register due to lazy ctxsw.
3370 * If true, then we read directly from the registers.
3372 if (can_access_pmu
){
3373 val
= ia64_get_pmd(cnum
);
3376 * context has been saved
3377 * if context is zombie, then task does not exist anymore.
3378 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3380 val
= is_loaded
? thread
->pmds
[cnum
] : 0UL;
3382 rd_func
= pmu_conf
->pmd_desc
[cnum
].read_check
;
3386 * XXX: need to check for overflow when loaded
3393 * execute read checker, if any
3395 if (unlikely(expert_mode
== 0 && rd_func
)) {
3396 unsigned long v
= val
;
3397 ret
= (*rd_func
)(ctx
->ctx_task
, ctx
, cnum
, &v
, regs
);
3398 if (ret
) goto error
;
3403 PFM_REG_RETFLAG_SET(reg_flags
, 0);
3405 DPRINT(("pmd[%u]=0x%lx\n", cnum
, val
));
3408 * update register return value, abort all if problem during copy.
3409 * we only modify the reg_flags field. no check mode is fine because
3410 * access has been verified upfront in sys_perfmonctl().
3412 req
->reg_value
= val
;
3413 req
->reg_flags
= reg_flags
;
3414 req
->reg_last_reset_val
= lval
;
3420 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3425 pfm_mod_write_pmcs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3429 if (req
== NULL
) return -EINVAL
;
3431 ctx
= GET_PMU_CTX();
3433 if (ctx
== NULL
) return -EINVAL
;
3436 * for now limit to current task, which is enough when calling
3437 * from overflow handler
3439 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3441 return pfm_write_pmcs(ctx
, req
, nreq
, regs
);
3443 EXPORT_SYMBOL(pfm_mod_write_pmcs
);
3446 pfm_mod_read_pmds(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3450 if (req
== NULL
) return -EINVAL
;
3452 ctx
= GET_PMU_CTX();
3454 if (ctx
== NULL
) return -EINVAL
;
3457 * for now limit to current task, which is enough when calling
3458 * from overflow handler
3460 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3462 return pfm_read_pmds(ctx
, req
, nreq
, regs
);
3464 EXPORT_SYMBOL(pfm_mod_read_pmds
);
3467 * Only call this function when a process it trying to
3468 * write the debug registers (reading is always allowed)
3471 pfm_use_debug_registers(struct task_struct
*task
)
3473 pfm_context_t
*ctx
= task
->thread
.pfm_context
;
3474 unsigned long flags
;
3477 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3479 DPRINT(("called for [%d]\n", task
->pid
));
3484 if (task
->thread
.flags
& IA64_THREAD_DBG_VALID
) return 0;
3487 * Even on SMP, we do not need to use an atomic here because
3488 * the only way in is via ptrace() and this is possible only when the
3489 * process is stopped. Even in the case where the ctxsw out is not totally
3490 * completed by the time we come here, there is no way the 'stopped' process
3491 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3492 * So this is always safe.
3494 if (ctx
&& ctx
->ctx_fl_using_dbreg
== 1) return -1;
3499 * We cannot allow setting breakpoints when system wide monitoring
3500 * sessions are using the debug registers.
3502 if (pfm_sessions
.pfs_sys_use_dbregs
> 0)
3505 pfm_sessions
.pfs_ptrace_use_dbregs
++;
3507 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3508 pfm_sessions
.pfs_ptrace_use_dbregs
,
3509 pfm_sessions
.pfs_sys_use_dbregs
,
3518 * This function is called for every task that exits with the
3519 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3520 * able to use the debug registers for debugging purposes via
3521 * ptrace(). Therefore we know it was not using them for
3522 * perfmormance monitoring, so we only decrement the number
3523 * of "ptraced" debug register users to keep the count up to date
3526 pfm_release_debug_registers(struct task_struct
*task
)
3528 unsigned long flags
;
3531 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3534 if (pfm_sessions
.pfs_ptrace_use_dbregs
== 0) {
3535 printk(KERN_ERR
"perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task
->pid
);
3538 pfm_sessions
.pfs_ptrace_use_dbregs
--;
3547 pfm_restart(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3549 struct task_struct
*task
;
3550 pfm_buffer_fmt_t
*fmt
;
3551 pfm_ovfl_ctrl_t rst_ctrl
;
3552 int state
, is_system
;
3555 state
= ctx
->ctx_state
;
3556 fmt
= ctx
->ctx_buf_fmt
;
3557 is_system
= ctx
->ctx_fl_system
;
3558 task
= PFM_CTX_TASK(ctx
);
3561 case PFM_CTX_MASKED
:
3563 case PFM_CTX_LOADED
:
3564 if (CTX_HAS_SMPL(ctx
) && fmt
->fmt_restart_active
) break;
3566 case PFM_CTX_UNLOADED
:
3567 case PFM_CTX_ZOMBIE
:
3568 DPRINT(("invalid state=%d\n", state
));
3571 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state
));
3576 * In system wide and when the context is loaded, access can only happen
3577 * when the caller is running on the CPU being monitored by the session.
3578 * It does not have to be the owner (ctx_task) of the context per se.
3580 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
3581 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3586 if (unlikely(task
== NULL
)) {
3587 printk(KERN_ERR
"perfmon: [%d] pfm_restart no task\n", current
->pid
);
3591 if (task
== current
|| is_system
) {
3593 fmt
= ctx
->ctx_buf_fmt
;
3595 DPRINT(("restarting self %d ovfl=0x%lx\n",
3597 ctx
->ctx_ovfl_regs
[0]));
3599 if (CTX_HAS_SMPL(ctx
)) {
3601 prefetch(ctx
->ctx_smpl_hdr
);
3603 rst_ctrl
.bits
.mask_monitoring
= 0;
3604 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
3606 if (state
== PFM_CTX_LOADED
)
3607 ret
= pfm_buf_fmt_restart_active(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3609 ret
= pfm_buf_fmt_restart(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3611 rst_ctrl
.bits
.mask_monitoring
= 0;
3612 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
3616 if (rst_ctrl
.bits
.reset_ovfl_pmds
)
3617 pfm_reset_regs(ctx
, ctx
->ctx_ovfl_regs
, PFM_PMD_LONG_RESET
);
3619 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
3620 DPRINT(("resuming monitoring for [%d]\n", task
->pid
));
3622 if (state
== PFM_CTX_MASKED
) pfm_restore_monitoring(task
);
3624 DPRINT(("keeping monitoring stopped for [%d]\n", task
->pid
));
3626 // cannot use pfm_stop_monitoring(task, regs);
3630 * clear overflowed PMD mask to remove any stale information
3632 ctx
->ctx_ovfl_regs
[0] = 0UL;
3635 * back to LOADED state
3637 ctx
->ctx_state
= PFM_CTX_LOADED
;
3640 * XXX: not really useful for self monitoring
3642 ctx
->ctx_fl_can_restart
= 0;
3648 * restart another task
3652 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3653 * one is seen by the task.
3655 if (state
== PFM_CTX_MASKED
) {
3656 if (ctx
->ctx_fl_can_restart
== 0) return -EINVAL
;
3658 * will prevent subsequent restart before this one is
3659 * seen by other task
3661 ctx
->ctx_fl_can_restart
= 0;
3665 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3666 * the task is blocked or on its way to block. That's the normal
3667 * restart path. If the monitoring is not masked, then the task
3668 * can be actively monitoring and we cannot directly intervene.
3669 * Therefore we use the trap mechanism to catch the task and
3670 * force it to reset the buffer/reset PMDs.
3672 * if non-blocking, then we ensure that the task will go into
3673 * pfm_handle_work() before returning to user mode.
3675 * We cannot explicitely reset another task, it MUST always
3676 * be done by the task itself. This works for system wide because
3677 * the tool that is controlling the session is logically doing
3678 * "self-monitoring".
3680 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && state
== PFM_CTX_MASKED
) {
3681 DPRINT(("unblocking [%d] \n", task
->pid
));
3682 up(&ctx
->ctx_restart_sem
);
3684 DPRINT(("[%d] armed exit trap\n", task
->pid
));
3686 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_RESET
;
3688 PFM_SET_WORK_PENDING(task
, 1);
3690 pfm_set_task_notify(task
);
3693 * XXX: send reschedule if task runs on another CPU
3700 pfm_debug(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3702 unsigned int m
= *(unsigned int *)arg
;
3704 pfm_sysctl
.debug
= m
== 0 ? 0 : 1;
3706 printk(KERN_INFO
"perfmon debugging %s (timing reset)\n", pfm_sysctl
.debug
? "on" : "off");
3709 memset(pfm_stats
, 0, sizeof(pfm_stats
));
3710 for(m
=0; m
< NR_CPUS
; m
++) pfm_stats
[m
].pfm_ovfl_intr_cycles_min
= ~0UL;
3716 * arg can be NULL and count can be zero for this function
3719 pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3721 struct thread_struct
*thread
= NULL
;
3722 struct task_struct
*task
;
3723 pfarg_dbreg_t
*req
= (pfarg_dbreg_t
*)arg
;
3724 unsigned long flags
;
3729 int i
, can_access_pmu
= 0;
3730 int is_system
, is_loaded
;
3732 if (pmu_conf
->use_rr_dbregs
== 0) return -EINVAL
;
3734 state
= ctx
->ctx_state
;
3735 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3736 is_system
= ctx
->ctx_fl_system
;
3737 task
= ctx
->ctx_task
;
3739 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3742 * on both UP and SMP, we can only write to the PMC when the task is
3743 * the owner of the local PMU.
3746 thread
= &task
->thread
;
3748 * In system wide and when the context is loaded, access can only happen
3749 * when the caller is running on the CPU being monitored by the session.
3750 * It does not have to be the owner (ctx_task) of the context per se.
3752 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3753 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3756 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3760 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3761 * ensuring that no real breakpoint can be installed via this call.
3763 * IMPORTANT: regs can be NULL in this function
3766 first_time
= ctx
->ctx_fl_using_dbreg
== 0;
3769 * don't bother if we are loaded and task is being debugged
3771 if (is_loaded
&& (thread
->flags
& IA64_THREAD_DBG_VALID
) != 0) {
3772 DPRINT(("debug registers already in use for [%d]\n", task
->pid
));
3777 * check for debug registers in system wide mode
3779 * If though a check is done in pfm_context_load(),
3780 * we must repeat it here, in case the registers are
3781 * written after the context is loaded
3786 if (first_time
&& is_system
) {
3787 if (pfm_sessions
.pfs_ptrace_use_dbregs
)
3790 pfm_sessions
.pfs_sys_use_dbregs
++;
3795 if (ret
!= 0) return ret
;
3798 * mark ourself as user of the debug registers for
3801 ctx
->ctx_fl_using_dbreg
= 1;
3804 * clear hardware registers to make sure we don't
3805 * pick up stale state.
3807 * for a system wide session, we do not use
3808 * thread.dbr, thread.ibr because this process
3809 * never leaves the current CPU and the state
3810 * is shared by all processes running on it
3812 if (first_time
&& can_access_pmu
) {
3813 DPRINT(("[%d] clearing ibrs, dbrs\n", task
->pid
));
3814 for (i
=0; i
< pmu_conf
->num_ibrs
; i
++) {
3815 ia64_set_ibr(i
, 0UL);
3816 ia64_dv_serialize_instruction();
3819 for (i
=0; i
< pmu_conf
->num_dbrs
; i
++) {
3820 ia64_set_dbr(i
, 0UL);
3821 ia64_dv_serialize_data();
3827 * Now install the values into the registers
3829 for (i
= 0; i
< count
; i
++, req
++) {
3831 rnum
= req
->dbreg_num
;
3832 dbreg
.val
= req
->dbreg_value
;
3836 if ((mode
== PFM_CODE_RR
&& rnum
>= PFM_NUM_IBRS
) || ((mode
== PFM_DATA_RR
) && rnum
>= PFM_NUM_DBRS
)) {
3837 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3838 rnum
, dbreg
.val
, mode
, i
, count
));
3844 * make sure we do not install enabled breakpoint
3847 if (mode
== PFM_CODE_RR
)
3848 dbreg
.ibr
.ibr_x
= 0;
3850 dbreg
.dbr
.dbr_r
= dbreg
.dbr
.dbr_w
= 0;
3853 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, 0);
3856 * Debug registers, just like PMC, can only be modified
3857 * by a kernel call. Moreover, perfmon() access to those
3858 * registers are centralized in this routine. The hardware
3859 * does not modify the value of these registers, therefore,
3860 * if we save them as they are written, we can avoid having
3861 * to save them on context switch out. This is made possible
3862 * by the fact that when perfmon uses debug registers, ptrace()
3863 * won't be able to modify them concurrently.
3865 if (mode
== PFM_CODE_RR
) {
3866 CTX_USED_IBR(ctx
, rnum
);
3868 if (can_access_pmu
) {
3869 ia64_set_ibr(rnum
, dbreg
.val
);
3870 ia64_dv_serialize_instruction();
3873 ctx
->ctx_ibrs
[rnum
] = dbreg
.val
;
3875 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3876 rnum
, dbreg
.val
, ctx
->ctx_used_ibrs
[0], is_loaded
, can_access_pmu
));
3878 CTX_USED_DBR(ctx
, rnum
);
3880 if (can_access_pmu
) {
3881 ia64_set_dbr(rnum
, dbreg
.val
);
3882 ia64_dv_serialize_data();
3884 ctx
->ctx_dbrs
[rnum
] = dbreg
.val
;
3886 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3887 rnum
, dbreg
.val
, ctx
->ctx_used_dbrs
[0], is_loaded
, can_access_pmu
));
3895 * in case it was our first attempt, we undo the global modifications
3899 if (ctx
->ctx_fl_system
) {
3900 pfm_sessions
.pfs_sys_use_dbregs
--;
3903 ctx
->ctx_fl_using_dbreg
= 0;
3906 * install error return flag
3908 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, PFM_REG_RETFL_EINVAL
);
3914 pfm_write_ibrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3916 return pfm_write_ibr_dbr(PFM_CODE_RR
, ctx
, arg
, count
, regs
);
3920 pfm_write_dbrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3922 return pfm_write_ibr_dbr(PFM_DATA_RR
, ctx
, arg
, count
, regs
);
3926 pfm_mod_write_ibrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3930 if (req
== NULL
) return -EINVAL
;
3932 ctx
= GET_PMU_CTX();
3934 if (ctx
== NULL
) return -EINVAL
;
3937 * for now limit to current task, which is enough when calling
3938 * from overflow handler
3940 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3942 return pfm_write_ibrs(ctx
, req
, nreq
, regs
);
3944 EXPORT_SYMBOL(pfm_mod_write_ibrs
);
3947 pfm_mod_write_dbrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3951 if (req
== NULL
) return -EINVAL
;
3953 ctx
= GET_PMU_CTX();
3955 if (ctx
== NULL
) return -EINVAL
;
3958 * for now limit to current task, which is enough when calling
3959 * from overflow handler
3961 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3963 return pfm_write_dbrs(ctx
, req
, nreq
, regs
);
3965 EXPORT_SYMBOL(pfm_mod_write_dbrs
);
3969 pfm_get_features(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3971 pfarg_features_t
*req
= (pfarg_features_t
*)arg
;
3973 req
->ft_version
= PFM_VERSION
;
3978 pfm_stop(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3980 struct pt_regs
*tregs
;
3981 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
3982 int state
, is_system
;
3984 state
= ctx
->ctx_state
;
3985 is_system
= ctx
->ctx_fl_system
;
3988 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3990 if (state
== PFM_CTX_UNLOADED
) return -EINVAL
;
3993 * In system wide and when the context is loaded, access can only happen
3994 * when the caller is running on the CPU being monitored by the session.
3995 * It does not have to be the owner (ctx_task) of the context per se.
3997 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
3998 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
4001 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4002 PFM_CTX_TASK(ctx
)->pid
,
4006 * in system mode, we need to update the PMU directly
4007 * and the user level state of the caller, which may not
4008 * necessarily be the creator of the context.
4012 * Update local PMU first
4016 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
4020 * update local cpuinfo
4022 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4025 * stop monitoring, does srlz.i
4030 * stop monitoring in the caller
4032 ia64_psr(regs
)->pp
= 0;
4040 if (task
== current
) {
4041 /* stop monitoring at kernel level */
4045 * stop monitoring at the user level
4047 ia64_psr(regs
)->up
= 0;
4049 tregs
= ia64_task_regs(task
);
4052 * stop monitoring at the user level
4054 ia64_psr(tregs
)->up
= 0;
4057 * monitoring disabled in kernel at next reschedule
4059 ctx
->ctx_saved_psr_up
= 0;
4060 DPRINT(("task=[%d]\n", task
->pid
));
4067 pfm_start(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4069 struct pt_regs
*tregs
;
4070 int state
, is_system
;
4072 state
= ctx
->ctx_state
;
4073 is_system
= ctx
->ctx_fl_system
;
4075 if (state
!= PFM_CTX_LOADED
) return -EINVAL
;
4078 * In system wide and when the context is loaded, access can only happen
4079 * when the caller is running on the CPU being monitored by the session.
4080 * It does not have to be the owner (ctx_task) of the context per se.
4082 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
4083 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
4088 * in system mode, we need to update the PMU directly
4089 * and the user level state of the caller, which may not
4090 * necessarily be the creator of the context.
4095 * set user level psr.pp for the caller
4097 ia64_psr(regs
)->pp
= 1;
4100 * now update the local PMU and cpuinfo
4102 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP
);
4105 * start monitoring at kernel level
4110 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
4120 if (ctx
->ctx_task
== current
) {
4122 /* start monitoring at kernel level */
4126 * activate monitoring at user level
4128 ia64_psr(regs
)->up
= 1;
4131 tregs
= ia64_task_regs(ctx
->ctx_task
);
4134 * start monitoring at the kernel level the next
4135 * time the task is scheduled
4137 ctx
->ctx_saved_psr_up
= IA64_PSR_UP
;
4140 * activate monitoring at user level
4142 ia64_psr(tregs
)->up
= 1;
4148 pfm_get_pmc_reset(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4150 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
4155 for (i
= 0; i
< count
; i
++, req
++) {
4157 cnum
= req
->reg_num
;
4159 if (!PMC_IS_IMPL(cnum
)) goto abort_mission
;
4161 req
->reg_value
= PMC_DFL_VAL(cnum
);
4163 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
4165 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum
, req
->reg_value
));
4170 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
4175 pfm_check_task_exist(pfm_context_t
*ctx
)
4177 struct task_struct
*g
, *t
;
4180 read_lock(&tasklist_lock
);
4182 do_each_thread (g
, t
) {
4183 if (t
->thread
.pfm_context
== ctx
) {
4187 } while_each_thread (g
, t
);
4189 read_unlock(&tasklist_lock
);
4191 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret
, ctx
));
4197 pfm_context_load(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4199 struct task_struct
*task
;
4200 struct thread_struct
*thread
;
4201 struct pfm_context_t
*old
;
4202 unsigned long flags
;
4204 struct task_struct
*owner_task
= NULL
;
4206 pfarg_load_t
*req
= (pfarg_load_t
*)arg
;
4207 unsigned long *pmcs_source
, *pmds_source
;
4210 int state
, is_system
, set_dbregs
= 0;
4212 state
= ctx
->ctx_state
;
4213 is_system
= ctx
->ctx_fl_system
;
4215 * can only load from unloaded or terminated state
4217 if (state
!= PFM_CTX_UNLOADED
) {
4218 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4224 DPRINT(("load_pid [%d] using_dbreg=%d\n", req
->load_pid
, ctx
->ctx_fl_using_dbreg
));
4226 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && req
->load_pid
== current
->pid
) {
4227 DPRINT(("cannot use blocking mode on self\n"));
4231 ret
= pfm_get_task(ctx
, req
->load_pid
, &task
);
4233 DPRINT(("load_pid [%d] get_task=%d\n", req
->load_pid
, ret
));
4240 * system wide is self monitoring only
4242 if (is_system
&& task
!= current
) {
4243 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4248 thread
= &task
->thread
;
4252 * cannot load a context which is using range restrictions,
4253 * into a task that is being debugged.
4255 if (ctx
->ctx_fl_using_dbreg
) {
4256 if (thread
->flags
& IA64_THREAD_DBG_VALID
) {
4258 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req
->load_pid
));
4264 if (pfm_sessions
.pfs_ptrace_use_dbregs
) {
4265 DPRINT(("cannot load [%d] dbregs in use\n", task
->pid
));
4268 pfm_sessions
.pfs_sys_use_dbregs
++;
4269 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task
->pid
, pfm_sessions
.pfs_sys_use_dbregs
));
4276 if (ret
) goto error
;
4280 * SMP system-wide monitoring implies self-monitoring.
4282 * The programming model expects the task to
4283 * be pinned on a CPU throughout the session.
4284 * Here we take note of the current CPU at the
4285 * time the context is loaded. No call from
4286 * another CPU will be allowed.
4288 * The pinning via shed_setaffinity()
4289 * must be done by the calling task prior
4292 * systemwide: keep track of CPU this session is supposed to run on
4294 the_cpu
= ctx
->ctx_cpu
= smp_processor_id();
4298 * now reserve the session
4300 ret
= pfm_reserve_session(current
, is_system
, the_cpu
);
4301 if (ret
) goto error
;
4304 * task is necessarily stopped at this point.
4306 * If the previous context was zombie, then it got removed in
4307 * pfm_save_regs(). Therefore we should not see it here.
4308 * If we see a context, then this is an active context
4310 * XXX: needs to be atomic
4312 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4313 thread
->pfm_context
, ctx
));
4316 old
= ia64_cmpxchg(acq
, &thread
->pfm_context
, NULL
, ctx
, sizeof(pfm_context_t
*));
4318 DPRINT(("load_pid [%d] already has a context\n", req
->load_pid
));
4322 pfm_reset_msgq(ctx
);
4324 ctx
->ctx_state
= PFM_CTX_LOADED
;
4327 * link context to task
4329 ctx
->ctx_task
= task
;
4333 * we load as stopped
4335 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE
);
4336 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4338 if (ctx
->ctx_fl_excl_idle
) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE
);
4340 thread
->flags
|= IA64_THREAD_PM_VALID
;
4344 * propagate into thread-state
4346 pfm_copy_pmds(task
, ctx
);
4347 pfm_copy_pmcs(task
, ctx
);
4349 pmcs_source
= thread
->pmcs
;
4350 pmds_source
= thread
->pmds
;
4353 * always the case for system-wide
4355 if (task
== current
) {
4357 if (is_system
== 0) {
4359 /* allow user level control */
4360 ia64_psr(regs
)->sp
= 0;
4361 DPRINT(("clearing psr.sp for [%d]\n", task
->pid
));
4363 SET_LAST_CPU(ctx
, smp_processor_id());
4365 SET_ACTIVATION(ctx
);
4368 * push the other task out, if any
4370 owner_task
= GET_PMU_OWNER();
4371 if (owner_task
) pfm_lazy_save_regs(owner_task
);
4375 * load all PMD from ctx to PMU (as opposed to thread state)
4376 * restore all PMC from ctx to PMU
4378 pfm_restore_pmds(pmds_source
, ctx
->ctx_all_pmds
[0]);
4379 pfm_restore_pmcs(pmcs_source
, ctx
->ctx_all_pmcs
[0]);
4381 ctx
->ctx_reload_pmcs
[0] = 0UL;
4382 ctx
->ctx_reload_pmds
[0] = 0UL;
4385 * guaranteed safe by earlier check against DBG_VALID
4387 if (ctx
->ctx_fl_using_dbreg
) {
4388 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
4389 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
4394 SET_PMU_OWNER(task
, ctx
);
4396 DPRINT(("context loaded on PMU for [%d]\n", task
->pid
));
4399 * when not current, task MUST be stopped, so this is safe
4401 regs
= ia64_task_regs(task
);
4403 /* force a full reload */
4404 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4405 SET_LAST_CPU(ctx
, -1);
4407 /* initial saved psr (stopped) */
4408 ctx
->ctx_saved_psr_up
= 0UL;
4409 ia64_psr(regs
)->up
= ia64_psr(regs
)->pp
= 0;
4415 if (ret
) pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, the_cpu
);
4418 * we must undo the dbregs setting (for system-wide)
4420 if (ret
&& set_dbregs
) {
4422 pfm_sessions
.pfs_sys_use_dbregs
--;
4426 * release task, there is now a link with the context
4428 if (is_system
== 0 && task
!= current
) {
4432 ret
= pfm_check_task_exist(ctx
);
4434 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4435 ctx
->ctx_task
= NULL
;
4443 * in this function, we do not need to increase the use count
4444 * for the task via get_task_struct(), because we hold the
4445 * context lock. If the task were to disappear while having
4446 * a context attached, it would go through pfm_exit_thread()
4447 * which also grabs the context lock and would therefore be blocked
4448 * until we are here.
4450 static void pfm_flush_pmds(struct task_struct
*, pfm_context_t
*ctx
);
4453 pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4455 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
4456 struct pt_regs
*tregs
;
4457 int prev_state
, is_system
;
4460 DPRINT(("ctx_state=%d task [%d]\n", ctx
->ctx_state
, task
? task
->pid
: -1));
4462 prev_state
= ctx
->ctx_state
;
4463 is_system
= ctx
->ctx_fl_system
;
4466 * unload only when necessary
4468 if (prev_state
== PFM_CTX_UNLOADED
) {
4469 DPRINT(("ctx_state=%d, nothing to do\n", prev_state
));
4474 * clear psr and dcr bits
4476 ret
= pfm_stop(ctx
, NULL
, 0, regs
);
4477 if (ret
) return ret
;
4479 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4482 * in system mode, we need to update the PMU directly
4483 * and the user level state of the caller, which may not
4484 * necessarily be the creator of the context.
4491 * local PMU is taken care of in pfm_stop()
4493 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE
);
4494 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE
);
4497 * save PMDs in context
4500 pfm_flush_pmds(current
, ctx
);
4503 * at this point we are done with the PMU
4504 * so we can unreserve the resource.
4506 if (prev_state
!= PFM_CTX_ZOMBIE
)
4507 pfm_unreserve_session(ctx
, 1 , ctx
->ctx_cpu
);
4510 * disconnect context from task
4512 task
->thread
.pfm_context
= NULL
;
4514 * disconnect task from context
4516 ctx
->ctx_task
= NULL
;
4519 * There is nothing more to cleanup here.
4527 tregs
= task
== current
? regs
: ia64_task_regs(task
);
4529 if (task
== current
) {
4531 * cancel user level control
4533 ia64_psr(regs
)->sp
= 1;
4535 DPRINT(("setting psr.sp for [%d]\n", task
->pid
));
4538 * save PMDs to context
4541 pfm_flush_pmds(task
, ctx
);
4544 * at this point we are done with the PMU
4545 * so we can unreserve the resource.
4547 * when state was ZOMBIE, we have already unreserved.
4549 if (prev_state
!= PFM_CTX_ZOMBIE
)
4550 pfm_unreserve_session(ctx
, 0 , ctx
->ctx_cpu
);
4553 * reset activation counter and psr
4555 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4556 SET_LAST_CPU(ctx
, -1);
4559 * PMU state will not be restored
4561 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
4564 * break links between context and task
4566 task
->thread
.pfm_context
= NULL
;
4567 ctx
->ctx_task
= NULL
;
4569 PFM_SET_WORK_PENDING(task
, 0);
4571 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
4572 ctx
->ctx_fl_can_restart
= 0;
4573 ctx
->ctx_fl_going_zombie
= 0;
4575 DPRINT(("disconnected [%d] from context\n", task
->pid
));
4582 * called only from exit_thread(): task == current
4583 * we come here only if current has a context attached (loaded or masked)
4586 pfm_exit_thread(struct task_struct
*task
)
4589 unsigned long flags
;
4590 struct pt_regs
*regs
= ia64_task_regs(task
);
4594 ctx
= PFM_GET_CTX(task
);
4596 PROTECT_CTX(ctx
, flags
);
4598 DPRINT(("state=%d task [%d]\n", ctx
->ctx_state
, task
->pid
));
4600 state
= ctx
->ctx_state
;
4602 case PFM_CTX_UNLOADED
:
4604 * only comes to thios function if pfm_context is not NULL, i.e., cannot
4605 * be in unloaded state
4607 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] ctx unloaded\n", task
->pid
);
4609 case PFM_CTX_LOADED
:
4610 case PFM_CTX_MASKED
:
4611 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4613 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task
->pid
, state
, ret
);
4615 DPRINT(("ctx unloaded for current state was %d\n", state
));
4617 pfm_end_notify_user(ctx
);
4619 case PFM_CTX_ZOMBIE
:
4620 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4622 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task
->pid
, state
, ret
);
4627 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task
->pid
, state
);
4630 UNPROTECT_CTX(ctx
, flags
);
4632 { u64 psr
= pfm_get_psr();
4633 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
4634 BUG_ON(GET_PMU_OWNER());
4635 BUG_ON(ia64_psr(regs
)->up
);
4636 BUG_ON(ia64_psr(regs
)->pp
);
4640 * All memory free operations (especially for vmalloc'ed memory)
4641 * MUST be done with interrupts ENABLED.
4643 if (free_ok
) pfm_context_free(ctx
);
4647 * functions MUST be listed in the increasing order of their index (see permfon.h)
4649 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4650 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4651 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4652 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4653 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4655 static pfm_cmd_desc_t pfm_cmd_tab
[]={
4656 /* 0 */PFM_CMD_NONE
,
4657 /* 1 */PFM_CMD(pfm_write_pmcs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4658 /* 2 */PFM_CMD(pfm_write_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4659 /* 3 */PFM_CMD(pfm_read_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4660 /* 4 */PFM_CMD_S(pfm_stop
, PFM_CMD_PCLRWS
),
4661 /* 5 */PFM_CMD_S(pfm_start
, PFM_CMD_PCLRWS
),
4662 /* 6 */PFM_CMD_NONE
,
4663 /* 7 */PFM_CMD_NONE
,
4664 /* 8 */PFM_CMD(pfm_context_create
, PFM_CMD_ARG_RW
, 1, pfarg_context_t
, pfm_ctx_getsize
),
4665 /* 9 */PFM_CMD_NONE
,
4666 /* 10 */PFM_CMD_S(pfm_restart
, PFM_CMD_PCLRW
),
4667 /* 11 */PFM_CMD_NONE
,
4668 /* 12 */PFM_CMD(pfm_get_features
, PFM_CMD_ARG_RW
, 1, pfarg_features_t
, NULL
),
4669 /* 13 */PFM_CMD(pfm_debug
, 0, 1, unsigned int, NULL
),
4670 /* 14 */PFM_CMD_NONE
,
4671 /* 15 */PFM_CMD(pfm_get_pmc_reset
, PFM_CMD_ARG_RW
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4672 /* 16 */PFM_CMD(pfm_context_load
, PFM_CMD_PCLRWS
, 1, pfarg_load_t
, NULL
),
4673 /* 17 */PFM_CMD_S(pfm_context_unload
, PFM_CMD_PCLRWS
),
4674 /* 18 */PFM_CMD_NONE
,
4675 /* 19 */PFM_CMD_NONE
,
4676 /* 20 */PFM_CMD_NONE
,
4677 /* 21 */PFM_CMD_NONE
,
4678 /* 22 */PFM_CMD_NONE
,
4679 /* 23 */PFM_CMD_NONE
,
4680 /* 24 */PFM_CMD_NONE
,
4681 /* 25 */PFM_CMD_NONE
,
4682 /* 26 */PFM_CMD_NONE
,
4683 /* 27 */PFM_CMD_NONE
,
4684 /* 28 */PFM_CMD_NONE
,
4685 /* 29 */PFM_CMD_NONE
,
4686 /* 30 */PFM_CMD_NONE
,
4687 /* 31 */PFM_CMD_NONE
,
4688 /* 32 */PFM_CMD(pfm_write_ibrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
),
4689 /* 33 */PFM_CMD(pfm_write_dbrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
)
4691 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4694 pfm_check_task_state(pfm_context_t
*ctx
, int cmd
, unsigned long flags
)
4696 struct task_struct
*task
;
4697 int state
, old_state
;
4700 state
= ctx
->ctx_state
;
4701 task
= ctx
->ctx_task
;
4704 DPRINT(("context %d no task, state=%d\n", ctx
->ctx_fd
, state
));
4708 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4712 task
->state
, PFM_CMD_STOPPED(cmd
)));
4715 * self-monitoring always ok.
4717 * for system-wide the caller can either be the creator of the
4718 * context (to one to which the context is attached to) OR
4719 * a task running on the same CPU as the session.
4721 if (task
== current
|| ctx
->ctx_fl_system
) return 0;
4724 * we are monitoring another thread
4727 case PFM_CTX_UNLOADED
:
4729 * if context is UNLOADED we are safe to go
4732 case PFM_CTX_ZOMBIE
:
4734 * no command can operate on a zombie context
4736 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd
));
4738 case PFM_CTX_MASKED
:
4740 * PMU state has been saved to software even though
4741 * the thread may still be running.
4743 if (cmd
!= PFM_UNLOAD_CONTEXT
) return 0;
4747 * context is LOADED or MASKED. Some commands may need to have
4750 * We could lift this restriction for UP but it would mean that
4751 * the user has no guarantee the task would not run between
4752 * two successive calls to perfmonctl(). That's probably OK.
4753 * If this user wants to ensure the task does not run, then
4754 * the task must be stopped.
4756 if (PFM_CMD_STOPPED(cmd
)) {
4757 if ((task
->state
!= TASK_STOPPED
) && (task
->state
!= TASK_TRACED
)) {
4758 DPRINT(("[%d] task not in stopped state\n", task
->pid
));
4762 * task is now stopped, wait for ctxsw out
4764 * This is an interesting point in the code.
4765 * We need to unprotect the context because
4766 * the pfm_save_regs() routines needs to grab
4767 * the same lock. There are danger in doing
4768 * this because it leaves a window open for
4769 * another task to get access to the context
4770 * and possibly change its state. The one thing
4771 * that is not possible is for the context to disappear
4772 * because we are protected by the VFS layer, i.e.,
4773 * get_fd()/put_fd().
4777 UNPROTECT_CTX(ctx
, flags
);
4779 wait_task_inactive(task
);
4781 PROTECT_CTX(ctx
, flags
);
4784 * we must recheck to verify if state has changed
4786 if (ctx
->ctx_state
!= old_state
) {
4787 DPRINT(("old_state=%d new_state=%d\n", old_state
, ctx
->ctx_state
));
4795 * system-call entry point (must return long)
4798 sys_perfmonctl (int fd
, int cmd
, void __user
*arg
, int count
)
4800 struct file
*file
= NULL
;
4801 pfm_context_t
*ctx
= NULL
;
4802 unsigned long flags
= 0UL;
4803 void *args_k
= NULL
;
4804 long ret
; /* will expand int return types */
4805 size_t base_sz
, sz
, xtra_sz
= 0;
4806 int narg
, completed_args
= 0, call_made
= 0, cmd_flags
;
4807 int (*func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
4808 int (*getsize
)(void *arg
, size_t *sz
);
4809 #define PFM_MAX_ARGSIZE 4096
4812 * reject any call if perfmon was disabled at initialization
4814 if (unlikely(pmu_conf
== NULL
)) return -ENOSYS
;
4816 if (unlikely(cmd
< 0 || cmd
>= PFM_CMD_COUNT
)) {
4817 DPRINT(("invalid cmd=%d\n", cmd
));
4821 func
= pfm_cmd_tab
[cmd
].cmd_func
;
4822 narg
= pfm_cmd_tab
[cmd
].cmd_narg
;
4823 base_sz
= pfm_cmd_tab
[cmd
].cmd_argsize
;
4824 getsize
= pfm_cmd_tab
[cmd
].cmd_getsize
;
4825 cmd_flags
= pfm_cmd_tab
[cmd
].cmd_flags
;
4827 if (unlikely(func
== NULL
)) {
4828 DPRINT(("invalid cmd=%d\n", cmd
));
4832 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4840 * check if number of arguments matches what the command expects
4842 if (unlikely((narg
== PFM_CMD_ARG_MANY
&& count
<= 0) || (narg
> 0 && narg
!= count
)))
4846 sz
= xtra_sz
+ base_sz
*count
;
4848 * limit abuse to min page size
4850 if (unlikely(sz
> PFM_MAX_ARGSIZE
)) {
4851 printk(KERN_ERR
"perfmon: [%d] argument too big %lu\n", current
->pid
, sz
);
4856 * allocate default-sized argument buffer
4858 if (likely(count
&& args_k
== NULL
)) {
4859 args_k
= kmalloc(PFM_MAX_ARGSIZE
, GFP_KERNEL
);
4860 if (args_k
== NULL
) return -ENOMEM
;
4868 * assume sz = 0 for command without parameters
4870 if (sz
&& copy_from_user(args_k
, arg
, sz
)) {
4871 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz
, arg
));
4876 * check if command supports extra parameters
4878 if (completed_args
== 0 && getsize
) {
4880 * get extra parameters size (based on main argument)
4882 ret
= (*getsize
)(args_k
, &xtra_sz
);
4883 if (ret
) goto error_args
;
4887 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz
, xtra_sz
));
4889 /* retry if necessary */
4890 if (likely(xtra_sz
)) goto restart_args
;
4893 if (unlikely((cmd_flags
& PFM_CMD_FD
) == 0)) goto skip_fd
;
4898 if (unlikely(file
== NULL
)) {
4899 DPRINT(("invalid fd %d\n", fd
));
4902 if (unlikely(PFM_IS_FILE(file
) == 0)) {
4903 DPRINT(("fd %d not related to perfmon\n", fd
));
4907 ctx
= (pfm_context_t
*)file
->private_data
;
4908 if (unlikely(ctx
== NULL
)) {
4909 DPRINT(("no context for fd %d\n", fd
));
4912 prefetch(&ctx
->ctx_state
);
4914 PROTECT_CTX(ctx
, flags
);
4917 * check task is stopped
4919 ret
= pfm_check_task_state(ctx
, cmd
, flags
);
4920 if (unlikely(ret
)) goto abort_locked
;
4923 ret
= (*func
)(ctx
, args_k
, count
, ia64_task_regs(current
));
4929 DPRINT(("context unlocked\n"));
4930 UNPROTECT_CTX(ctx
, flags
);
4934 /* copy argument back to user, if needed */
4935 if (call_made
&& PFM_CMD_RW_ARG(cmd
) && copy_to_user(arg
, args_k
, base_sz
*count
)) ret
= -EFAULT
;
4938 if (args_k
) kfree(args_k
);
4940 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd
), ret
));
4946 pfm_resume_after_ovfl(pfm_context_t
*ctx
, unsigned long ovfl_regs
, struct pt_regs
*regs
)
4948 pfm_buffer_fmt_t
*fmt
= ctx
->ctx_buf_fmt
;
4949 pfm_ovfl_ctrl_t rst_ctrl
;
4953 state
= ctx
->ctx_state
;
4955 * Unlock sampling buffer and reset index atomically
4956 * XXX: not really needed when blocking
4958 if (CTX_HAS_SMPL(ctx
)) {
4960 rst_ctrl
.bits
.mask_monitoring
= 0;
4961 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
4963 if (state
== PFM_CTX_LOADED
)
4964 ret
= pfm_buf_fmt_restart_active(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4966 ret
= pfm_buf_fmt_restart(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4968 rst_ctrl
.bits
.mask_monitoring
= 0;
4969 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
4973 if (rst_ctrl
.bits
.reset_ovfl_pmds
) {
4974 pfm_reset_regs(ctx
, &ovfl_regs
, PFM_PMD_LONG_RESET
);
4976 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
4977 DPRINT(("resuming monitoring\n"));
4978 if (ctx
->ctx_state
== PFM_CTX_MASKED
) pfm_restore_monitoring(current
);
4980 DPRINT(("stopping monitoring\n"));
4981 //pfm_stop_monitoring(current, regs);
4983 ctx
->ctx_state
= PFM_CTX_LOADED
;
4988 * context MUST BE LOCKED when calling
4989 * can only be called for current
4992 pfm_context_force_terminate(pfm_context_t
*ctx
, struct pt_regs
*regs
)
4996 DPRINT(("entering for [%d]\n", current
->pid
));
4998 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
5000 printk(KERN_ERR
"pfm_context_force_terminate: [%d] unloaded failed with %d\n", current
->pid
, ret
);
5004 * and wakeup controlling task, indicating we are now disconnected
5006 wake_up_interruptible(&ctx
->ctx_zombieq
);
5009 * given that context is still locked, the controlling
5010 * task will only get access when we return from
5011 * pfm_handle_work().
5015 static int pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
);
5017 * pfm_handle_work() can be called with interrupts enabled
5018 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5019 * call may sleep, therefore we must re-enable interrupts
5020 * to avoid deadlocks. It is safe to do so because this function
5021 * is called ONLY when returning to user level (PUStk=1), in which case
5022 * there is no risk of kernel stack overflow due to deep
5023 * interrupt nesting.
5026 pfm_handle_work(void)
5029 struct pt_regs
*regs
;
5030 unsigned long flags
, dummy_flags
;
5031 unsigned long ovfl_regs
;
5032 unsigned int reason
;
5035 ctx
= PFM_GET_CTX(current
);
5037 printk(KERN_ERR
"perfmon: [%d] has no PFM context\n", current
->pid
);
5041 PROTECT_CTX(ctx
, flags
);
5043 PFM_SET_WORK_PENDING(current
, 0);
5045 pfm_clear_task_notify();
5047 regs
= ia64_task_regs(current
);
5050 * extract reason for being here and clear
5052 reason
= ctx
->ctx_fl_trap_reason
;
5053 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
5054 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5056 DPRINT(("reason=%d state=%d\n", reason
, ctx
->ctx_state
));
5059 * must be done before we check for simple-reset mode
5061 if (ctx
->ctx_fl_going_zombie
|| ctx
->ctx_state
== PFM_CTX_ZOMBIE
) goto do_zombie
;
5064 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5065 if (reason
== PFM_TRAP_REASON_RESET
) goto skip_blocking
;
5068 * restore interrupt mask to what it was on entry.
5069 * Could be enabled/diasbled.
5071 UNPROTECT_CTX(ctx
, flags
);
5074 * force interrupt enable because of down_interruptible()
5078 DPRINT(("before block sleeping\n"));
5081 * may go through without blocking on SMP systems
5082 * if restart has been received already by the time we call down()
5084 ret
= down_interruptible(&ctx
->ctx_restart_sem
);
5086 DPRINT(("after block sleeping ret=%d\n", ret
));
5089 * lock context and mask interrupts again
5090 * We save flags into a dummy because we may have
5091 * altered interrupts mask compared to entry in this
5094 PROTECT_CTX(ctx
, dummy_flags
);
5097 * we need to read the ovfl_regs only after wake-up
5098 * because we may have had pfm_write_pmds() in between
5099 * and that can changed PMD values and therefore
5100 * ovfl_regs is reset for these new PMD values.
5102 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5104 if (ctx
->ctx_fl_going_zombie
) {
5106 DPRINT(("context is zombie, bailing out\n"));
5107 pfm_context_force_terminate(ctx
, regs
);
5111 * in case of interruption of down() we don't restart anything
5113 if (ret
< 0) goto nothing_to_do
;
5116 pfm_resume_after_ovfl(ctx
, ovfl_regs
, regs
);
5117 ctx
->ctx_ovfl_regs
[0] = 0UL;
5121 * restore flags as they were upon entry
5123 UNPROTECT_CTX(ctx
, flags
);
5127 pfm_notify_user(pfm_context_t
*ctx
, pfm_msg_t
*msg
)
5129 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5130 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5134 DPRINT(("waking up somebody\n"));
5136 if (msg
) wake_up_interruptible(&ctx
->ctx_msgq_wait
);
5139 * safe, we are not in intr handler, nor in ctxsw when
5142 kill_fasync (&ctx
->ctx_async_queue
, SIGIO
, POLL_IN
);
5148 pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
)
5150 pfm_msg_t
*msg
= NULL
;
5152 if (ctx
->ctx_fl_no_msg
== 0) {
5153 msg
= pfm_get_new_msg(ctx
);
5155 printk(KERN_ERR
"perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5159 msg
->pfm_ovfl_msg
.msg_type
= PFM_MSG_OVFL
;
5160 msg
->pfm_ovfl_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5161 msg
->pfm_ovfl_msg
.msg_active_set
= 0;
5162 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[0] = ovfl_pmds
;
5163 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[1] = 0UL;
5164 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[2] = 0UL;
5165 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[3] = 0UL;
5166 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5169 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5175 return pfm_notify_user(ctx
, msg
);
5179 pfm_end_notify_user(pfm_context_t
*ctx
)
5183 msg
= pfm_get_new_msg(ctx
);
5185 printk(KERN_ERR
"perfmon: pfm_end_notify_user no more notification msgs\n");
5189 memset(msg
, 0, sizeof(*msg
));
5191 msg
->pfm_end_msg
.msg_type
= PFM_MSG_END
;
5192 msg
->pfm_end_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5193 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5195 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5200 return pfm_notify_user(ctx
, msg
);
5204 * main overflow processing routine.
5205 * it can be called from the interrupt path or explicitely during the context switch code
5208 pfm_overflow_handler(struct task_struct
*task
, pfm_context_t
*ctx
, u64 pmc0
, struct pt_regs
*regs
)
5210 pfm_ovfl_arg_t
*ovfl_arg
;
5212 unsigned long old_val
, ovfl_val
, new_val
;
5213 unsigned long ovfl_notify
= 0UL, ovfl_pmds
= 0UL, smpl_pmds
= 0UL, reset_pmds
;
5214 unsigned long tstamp
;
5215 pfm_ovfl_ctrl_t ovfl_ctrl
;
5216 unsigned int i
, has_smpl
;
5217 int must_notify
= 0;
5219 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) goto stop_monitoring
;
5222 * sanity test. Should never happen
5224 if (unlikely((pmc0
& 0x1) == 0)) goto sanity_check
;
5226 tstamp
= ia64_get_itc();
5227 mask
= pmc0
>> PMU_FIRST_COUNTER
;
5228 ovfl_val
= pmu_conf
->ovfl_val
;
5229 has_smpl
= CTX_HAS_SMPL(ctx
);
5231 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5232 "used_pmds=0x%lx\n",
5234 task
? task
->pid
: -1,
5235 (regs
? regs
->cr_iip
: 0),
5236 CTX_OVFL_NOBLOCK(ctx
) ? "nonblocking" : "blocking",
5237 ctx
->ctx_used_pmds
[0]));
5241 * first we update the virtual counters
5242 * assume there was a prior ia64_srlz_d() issued
5244 for (i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
5246 /* skip pmd which did not overflow */
5247 if ((mask
& 0x1) == 0) continue;
5250 * Note that the pmd is not necessarily 0 at this point as qualified events
5251 * may have happened before the PMU was frozen. The residual count is not
5252 * taken into consideration here but will be with any read of the pmd via
5255 old_val
= new_val
= ctx
->ctx_pmds
[i
].val
;
5256 new_val
+= 1 + ovfl_val
;
5257 ctx
->ctx_pmds
[i
].val
= new_val
;
5260 * check for overflow condition
5262 if (likely(old_val
> new_val
)) {
5263 ovfl_pmds
|= 1UL << i
;
5264 if (PMC_OVFL_NOTIFY(ctx
, i
)) ovfl_notify
|= 1UL << i
;
5267 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5271 ia64_get_pmd(i
) & ovfl_val
,
5277 * there was no 64-bit overflow, nothing else to do
5279 if (ovfl_pmds
== 0UL) return;
5282 * reset all control bits
5288 * if a sampling format module exists, then we "cache" the overflow by
5289 * calling the module's handler() routine.
5292 unsigned long start_cycles
, end_cycles
;
5293 unsigned long pmd_mask
;
5295 int this_cpu
= smp_processor_id();
5297 pmd_mask
= ovfl_pmds
>> PMU_FIRST_COUNTER
;
5298 ovfl_arg
= &ctx
->ctx_ovfl_arg
;
5300 prefetch(ctx
->ctx_smpl_hdr
);
5302 for(i
=PMU_FIRST_COUNTER
; pmd_mask
&& ret
== 0; i
++, pmd_mask
>>=1) {
5306 if ((pmd_mask
& 0x1) == 0) continue;
5308 ovfl_arg
->ovfl_pmd
= (unsigned char )i
;
5309 ovfl_arg
->ovfl_notify
= ovfl_notify
& mask
? 1 : 0;
5310 ovfl_arg
->active_set
= 0;
5311 ovfl_arg
->ovfl_ctrl
.val
= 0; /* module must fill in all fields */
5312 ovfl_arg
->smpl_pmds
[0] = smpl_pmds
= ctx
->ctx_pmds
[i
].smpl_pmds
[0];
5314 ovfl_arg
->pmd_value
= ctx
->ctx_pmds
[i
].val
;
5315 ovfl_arg
->pmd_last_reset
= ctx
->ctx_pmds
[i
].lval
;
5316 ovfl_arg
->pmd_eventid
= ctx
->ctx_pmds
[i
].eventid
;
5319 * copy values of pmds of interest. Sampling format may copy them
5320 * into sampling buffer.
5323 for(j
=0, k
=0; smpl_pmds
; j
++, smpl_pmds
>>=1) {
5324 if ((smpl_pmds
& 0x1) == 0) continue;
5325 ovfl_arg
->smpl_pmds_values
[k
++] = PMD_IS_COUNTING(j
) ? pfm_read_soft_counter(ctx
, j
) : ia64_get_pmd(j
);
5326 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k
-1, j
, ovfl_arg
->smpl_pmds_values
[k
-1]));
5330 pfm_stats
[this_cpu
].pfm_smpl_handler_calls
++;
5332 start_cycles
= ia64_get_itc();
5335 * call custom buffer format record (handler) routine
5337 ret
= (*ctx
->ctx_buf_fmt
->fmt_handler
)(task
, ctx
->ctx_smpl_hdr
, ovfl_arg
, regs
, tstamp
);
5339 end_cycles
= ia64_get_itc();
5342 * For those controls, we take the union because they have
5343 * an all or nothing behavior.
5345 ovfl_ctrl
.bits
.notify_user
|= ovfl_arg
->ovfl_ctrl
.bits
.notify_user
;
5346 ovfl_ctrl
.bits
.block_task
|= ovfl_arg
->ovfl_ctrl
.bits
.block_task
;
5347 ovfl_ctrl
.bits
.mask_monitoring
|= ovfl_arg
->ovfl_ctrl
.bits
.mask_monitoring
;
5349 * build the bitmask of pmds to reset now
5351 if (ovfl_arg
->ovfl_ctrl
.bits
.reset_ovfl_pmds
) reset_pmds
|= mask
;
5353 pfm_stats
[this_cpu
].pfm_smpl_handler_cycles
+= end_cycles
- start_cycles
;
5356 * when the module cannot handle the rest of the overflows, we abort right here
5358 if (ret
&& pmd_mask
) {
5359 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5360 pmd_mask
<<PMU_FIRST_COUNTER
));
5363 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5365 ovfl_pmds
&= ~reset_pmds
;
5368 * when no sampling module is used, then the default
5369 * is to notify on overflow if requested by user
5371 ovfl_ctrl
.bits
.notify_user
= ovfl_notify
? 1 : 0;
5372 ovfl_ctrl
.bits
.block_task
= ovfl_notify
? 1 : 0;
5373 ovfl_ctrl
.bits
.mask_monitoring
= ovfl_notify
? 1 : 0; /* XXX: change for saturation */
5374 ovfl_ctrl
.bits
.reset_ovfl_pmds
= ovfl_notify
? 0 : 1;
5376 * if needed, we reset all overflowed pmds
5378 if (ovfl_notify
== 0) reset_pmds
= ovfl_pmds
;
5381 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds
, reset_pmds
));
5384 * reset the requested PMD registers using the short reset values
5387 unsigned long bm
= reset_pmds
;
5388 pfm_reset_regs(ctx
, &bm
, PFM_PMD_SHORT_RESET
);
5391 if (ovfl_notify
&& ovfl_ctrl
.bits
.notify_user
) {
5393 * keep track of what to reset when unblocking
5395 ctx
->ctx_ovfl_regs
[0] = ovfl_pmds
;
5398 * check for blocking context
5400 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && ovfl_ctrl
.bits
.block_task
) {
5402 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_BLOCK
;
5405 * set the perfmon specific checking pending work for the task
5407 PFM_SET_WORK_PENDING(task
, 1);
5410 * when coming from ctxsw, current still points to the
5411 * previous task, therefore we must work with task and not current.
5413 pfm_set_task_notify(task
);
5416 * defer until state is changed (shorten spin window). the context is locked
5417 * anyway, so the signal receiver would come spin for nothing.
5422 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5423 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid
: -1,
5424 PFM_GET_WORK_PENDING(task
),
5425 ctx
->ctx_fl_trap_reason
,
5428 ovfl_ctrl
.bits
.mask_monitoring
? 1 : 0));
5430 * in case monitoring must be stopped, we toggle the psr bits
5432 if (ovfl_ctrl
.bits
.mask_monitoring
) {
5433 pfm_mask_monitoring(task
);
5434 ctx
->ctx_state
= PFM_CTX_MASKED
;
5435 ctx
->ctx_fl_can_restart
= 1;
5439 * send notification now
5441 if (must_notify
) pfm_ovfl_notify_user(ctx
, ovfl_notify
);
5446 printk(KERN_ERR
"perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5448 task
? task
->pid
: -1,
5454 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5455 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5456 * come here as zombie only if the task is the current task. In which case, we
5457 * can access the PMU hardware directly.
5459 * Note that zombies do have PM_VALID set. So here we do the minimal.
5461 * In case the context was zombified it could not be reclaimed at the time
5462 * the monitoring program exited. At this point, the PMU reservation has been
5463 * returned, the sampiing buffer has been freed. We must convert this call
5464 * into a spurious interrupt. However, we must also avoid infinite overflows
5465 * by stopping monitoring for this task. We can only come here for a per-task
5466 * context. All we need to do is to stop monitoring using the psr bits which
5467 * are always task private. By re-enabling secure montioring, we ensure that
5468 * the monitored task will not be able to re-activate monitoring.
5469 * The task will eventually be context switched out, at which point the context
5470 * will be reclaimed (that includes releasing ownership of the PMU).
5472 * So there might be a window of time where the number of per-task session is zero
5473 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5474 * context. This is safe because if a per-task session comes in, it will push this one
5475 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5476 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5477 * also push our zombie context out.
5479 * Overall pretty hairy stuff....
5481 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task
? task
->pid
: -1));
5483 ia64_psr(regs
)->up
= 0;
5484 ia64_psr(regs
)->sp
= 1;
5489 pfm_do_interrupt_handler(int irq
, void *arg
, struct pt_regs
*regs
)
5491 struct task_struct
*task
;
5493 unsigned long flags
;
5495 int this_cpu
= smp_processor_id();
5498 pfm_stats
[this_cpu
].pfm_ovfl_intr_count
++;
5501 * srlz.d done before arriving here
5503 pmc0
= ia64_get_pmc(0);
5505 task
= GET_PMU_OWNER();
5506 ctx
= GET_PMU_CTX();
5509 * if we have some pending bits set
5510 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5512 if (PMC0_HAS_OVFL(pmc0
) && task
) {
5514 * we assume that pmc0.fr is always set here
5518 if (!ctx
) goto report_spurious1
;
5520 if (ctx
->ctx_fl_system
== 0 && (task
->thread
.flags
& IA64_THREAD_PM_VALID
) == 0)
5521 goto report_spurious2
;
5523 PROTECT_CTX_NOPRINT(ctx
, flags
);
5525 pfm_overflow_handler(task
, ctx
, pmc0
, regs
);
5527 UNPROTECT_CTX_NOPRINT(ctx
, flags
);
5530 pfm_stats
[this_cpu
].pfm_spurious_ovfl_intr_count
++;
5534 * keep it unfrozen at all times
5541 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5542 this_cpu
, task
->pid
);
5546 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5554 pfm_interrupt_handler(int irq
, void *arg
, struct pt_regs
*regs
)
5556 unsigned long start_cycles
, total_cycles
;
5557 unsigned long min
, max
;
5561 this_cpu
= get_cpu();
5562 if (likely(!pfm_alt_intr_handler
)) {
5563 min
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
;
5564 max
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
;
5566 start_cycles
= ia64_get_itc();
5568 ret
= pfm_do_interrupt_handler(irq
, arg
, regs
);
5570 total_cycles
= ia64_get_itc();
5573 * don't measure spurious interrupts
5575 if (likely(ret
== 0)) {
5576 total_cycles
-= start_cycles
;
5578 if (total_cycles
< min
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
= total_cycles
;
5579 if (total_cycles
> max
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
= total_cycles
;
5581 pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles
+= total_cycles
;
5585 (*pfm_alt_intr_handler
->handler
)(irq
, arg
, regs
);
5588 put_cpu_no_resched();
5593 * /proc/perfmon interface, for debug only
5596 #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5599 pfm_proc_start(struct seq_file
*m
, loff_t
*pos
)
5602 return PFM_PROC_SHOW_HEADER
;
5605 while (*pos
<= NR_CPUS
) {
5606 if (cpu_online(*pos
- 1)) {
5607 return (void *)*pos
;
5615 pfm_proc_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5618 return pfm_proc_start(m
, pos
);
5622 pfm_proc_stop(struct seq_file
*m
, void *v
)
5627 pfm_proc_show_header(struct seq_file
*m
)
5629 struct list_head
* pos
;
5630 pfm_buffer_fmt_t
* entry
;
5631 unsigned long flags
;
5634 "perfmon version : %u.%u\n"
5637 "expert mode : %s\n"
5638 "ovfl_mask : 0x%lx\n"
5639 "PMU flags : 0x%x\n",
5640 PFM_VERSION_MAJ
, PFM_VERSION_MIN
,
5642 pfm_sysctl
.fastctxsw
> 0 ? "Yes": "No",
5643 pfm_sysctl
.expert_mode
> 0 ? "Yes": "No",
5650 "proc_sessions : %u\n"
5651 "sys_sessions : %u\n"
5652 "sys_use_dbregs : %u\n"
5653 "ptrace_use_dbregs : %u\n",
5654 pfm_sessions
.pfs_task_sessions
,
5655 pfm_sessions
.pfs_sys_sessions
,
5656 pfm_sessions
.pfs_sys_use_dbregs
,
5657 pfm_sessions
.pfs_ptrace_use_dbregs
);
5661 spin_lock(&pfm_buffer_fmt_lock
);
5663 list_for_each(pos
, &pfm_buffer_fmt_list
) {
5664 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
5665 seq_printf(m
, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5676 entry
->fmt_uuid
[10],
5677 entry
->fmt_uuid
[11],
5678 entry
->fmt_uuid
[12],
5679 entry
->fmt_uuid
[13],
5680 entry
->fmt_uuid
[14],
5681 entry
->fmt_uuid
[15],
5684 spin_unlock(&pfm_buffer_fmt_lock
);
5689 pfm_proc_show(struct seq_file
*m
, void *v
)
5695 if (v
== PFM_PROC_SHOW_HEADER
) {
5696 pfm_proc_show_header(m
);
5700 /* show info for CPU (v - 1) */
5704 "CPU%-2d overflow intrs : %lu\n"
5705 "CPU%-2d overflow cycles : %lu\n"
5706 "CPU%-2d overflow min : %lu\n"
5707 "CPU%-2d overflow max : %lu\n"
5708 "CPU%-2d smpl handler calls : %lu\n"
5709 "CPU%-2d smpl handler cycles : %lu\n"
5710 "CPU%-2d spurious intrs : %lu\n"
5711 "CPU%-2d replay intrs : %lu\n"
5712 "CPU%-2d syst_wide : %d\n"
5713 "CPU%-2d dcr_pp : %d\n"
5714 "CPU%-2d exclude idle : %d\n"
5715 "CPU%-2d owner : %d\n"
5716 "CPU%-2d context : %p\n"
5717 "CPU%-2d activations : %lu\n",
5718 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_count
,
5719 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles
,
5720 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_min
,
5721 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_max
,
5722 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_calls
,
5723 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_cycles
,
5724 cpu
, pfm_stats
[cpu
].pfm_spurious_ovfl_intr_count
,
5725 cpu
, pfm_stats
[cpu
].pfm_replay_ovfl_intr_count
,
5726 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_SYST_WIDE
? 1 : 0,
5727 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_DCR_PP
? 1 : 0,
5728 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_EXCL_IDLE
? 1 : 0,
5729 cpu
, pfm_get_cpu_data(pmu_owner
, cpu
) ? pfm_get_cpu_data(pmu_owner
, cpu
)->pid
: -1,
5730 cpu
, pfm_get_cpu_data(pmu_ctx
, cpu
),
5731 cpu
, pfm_get_cpu_data(pmu_activation_number
, cpu
));
5733 if (num_online_cpus() == 1 && pfm_sysctl
.debug
> 0) {
5735 psr
= pfm_get_psr();
5740 "CPU%-2d psr : 0x%lx\n"
5741 "CPU%-2d pmc0 : 0x%lx\n",
5743 cpu
, ia64_get_pmc(0));
5745 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
5746 if (PMC_IS_COUNTING(i
) == 0) continue;
5748 "CPU%-2d pmc%u : 0x%lx\n"
5749 "CPU%-2d pmd%u : 0x%lx\n",
5750 cpu
, i
, ia64_get_pmc(i
),
5751 cpu
, i
, ia64_get_pmd(i
));
5757 struct seq_operations pfm_seq_ops
= {
5758 .start
= pfm_proc_start
,
5759 .next
= pfm_proc_next
,
5760 .stop
= pfm_proc_stop
,
5761 .show
= pfm_proc_show
5765 pfm_proc_open(struct inode
*inode
, struct file
*file
)
5767 return seq_open(file
, &pfm_seq_ops
);
5772 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5773 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5774 * is active or inactive based on mode. We must rely on the value in
5775 * local_cpu_data->pfm_syst_info
5778 pfm_syst_wide_update_task(struct task_struct
*task
, unsigned long info
, int is_ctxswin
)
5780 struct pt_regs
*regs
;
5782 unsigned long dcr_pp
;
5784 dcr_pp
= info
& PFM_CPUINFO_DCR_PP
? 1 : 0;
5787 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5788 * on every CPU, so we can rely on the pid to identify the idle task.
5790 if ((info
& PFM_CPUINFO_EXCL_IDLE
) == 0 || task
->pid
) {
5791 regs
= ia64_task_regs(task
);
5792 ia64_psr(regs
)->pp
= is_ctxswin
? dcr_pp
: 0;
5796 * if monitoring has started
5799 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
5801 * context switching in?
5804 /* mask monitoring for the idle task */
5805 ia64_setreg(_IA64_REG_CR_DCR
, dcr
& ~IA64_DCR_PP
);
5811 * context switching out
5812 * restore monitoring for next task
5814 * Due to inlining this odd if-then-else construction generates
5817 ia64_setreg(_IA64_REG_CR_DCR
, dcr
|IA64_DCR_PP
);
5826 pfm_force_cleanup(pfm_context_t
*ctx
, struct pt_regs
*regs
)
5828 struct task_struct
*task
= ctx
->ctx_task
;
5830 ia64_psr(regs
)->up
= 0;
5831 ia64_psr(regs
)->sp
= 1;
5833 if (GET_PMU_OWNER() == task
) {
5834 DPRINT(("cleared ownership for [%d]\n", ctx
->ctx_task
->pid
));
5835 SET_PMU_OWNER(NULL
, NULL
);
5839 * disconnect the task from the context and vice-versa
5841 PFM_SET_WORK_PENDING(task
, 0);
5843 task
->thread
.pfm_context
= NULL
;
5844 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
5846 DPRINT(("force cleanup for [%d]\n", task
->pid
));
5851 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5854 pfm_save_regs(struct task_struct
*task
)
5857 struct thread_struct
*t
;
5858 unsigned long flags
;
5862 ctx
= PFM_GET_CTX(task
);
5863 if (ctx
== NULL
) return;
5867 * we always come here with interrupts ALREADY disabled by
5868 * the scheduler. So we simply need to protect against concurrent
5869 * access, not CPU concurrency.
5871 flags
= pfm_protect_ctx_ctxsw(ctx
);
5873 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5874 struct pt_regs
*regs
= ia64_task_regs(task
);
5878 pfm_force_cleanup(ctx
, regs
);
5880 BUG_ON(ctx
->ctx_smpl_hdr
);
5882 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5884 pfm_context_free(ctx
);
5889 * save current PSR: needed because we modify it
5892 psr
= pfm_get_psr();
5894 BUG_ON(psr
& (IA64_PSR_I
));
5898 * This is the last instruction which may generate an overflow
5900 * We do not need to set psr.sp because, it is irrelevant in kernel.
5901 * It will be restored from ipsr when going back to user level
5906 * keep a copy of psr.up (for reload)
5908 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5911 * release ownership of this PMU.
5912 * PM interrupts are masked, so nothing
5915 SET_PMU_OWNER(NULL
, NULL
);
5918 * we systematically save the PMD as we have no
5919 * guarantee we will be schedule at that same
5922 pfm_save_pmds(t
->pmds
, ctx
->ctx_used_pmds
[0]);
5925 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5926 * we will need it on the restore path to check
5927 * for pending overflow.
5929 t
->pmcs
[0] = ia64_get_pmc(0);
5932 * unfreeze PMU if had pending overflows
5934 if (t
->pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
5937 * finally, allow context access.
5938 * interrupts will still be masked after this call.
5940 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5943 #else /* !CONFIG_SMP */
5945 pfm_save_regs(struct task_struct
*task
)
5950 ctx
= PFM_GET_CTX(task
);
5951 if (ctx
== NULL
) return;
5954 * save current PSR: needed because we modify it
5956 psr
= pfm_get_psr();
5958 BUG_ON(psr
& (IA64_PSR_I
));
5962 * This is the last instruction which may generate an overflow
5964 * We do not need to set psr.sp because, it is irrelevant in kernel.
5965 * It will be restored from ipsr when going back to user level
5970 * keep a copy of psr.up (for reload)
5972 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5976 pfm_lazy_save_regs (struct task_struct
*task
)
5979 struct thread_struct
*t
;
5980 unsigned long flags
;
5982 { u64 psr
= pfm_get_psr();
5983 BUG_ON(psr
& IA64_PSR_UP
);
5986 ctx
= PFM_GET_CTX(task
);
5990 * we need to mask PMU overflow here to
5991 * make sure that we maintain pmc0 until
5992 * we save it. overflow interrupts are
5993 * treated as spurious if there is no
5996 * XXX: I don't think this is necessary
5998 PROTECT_CTX(ctx
,flags
);
6001 * release ownership of this PMU.
6002 * must be done before we save the registers.
6004 * after this call any PMU interrupt is treated
6007 SET_PMU_OWNER(NULL
, NULL
);
6010 * save all the pmds we use
6012 pfm_save_pmds(t
->pmds
, ctx
->ctx_used_pmds
[0]);
6015 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6016 * it is needed to check for pended overflow
6017 * on the restore path
6019 t
->pmcs
[0] = ia64_get_pmc(0);
6022 * unfreeze PMU if had pending overflows
6024 if (t
->pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
6027 * now get can unmask PMU interrupts, they will
6028 * be treated as purely spurious and we will not
6029 * lose any information
6031 UNPROTECT_CTX(ctx
,flags
);
6033 #endif /* CONFIG_SMP */
6037 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6040 pfm_load_regs (struct task_struct
*task
)
6043 struct thread_struct
*t
;
6044 unsigned long pmc_mask
= 0UL, pmd_mask
= 0UL;
6045 unsigned long flags
;
6047 int need_irq_resend
;
6049 ctx
= PFM_GET_CTX(task
);
6050 if (unlikely(ctx
== NULL
)) return;
6052 BUG_ON(GET_PMU_OWNER());
6056 * possible on unload
6058 if (unlikely((t
->flags
& IA64_THREAD_PM_VALID
) == 0)) return;
6061 * we always come here with interrupts ALREADY disabled by
6062 * the scheduler. So we simply need to protect against concurrent
6063 * access, not CPU concurrency.
6065 flags
= pfm_protect_ctx_ctxsw(ctx
);
6066 psr
= pfm_get_psr();
6068 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6070 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6071 BUG_ON(psr
& IA64_PSR_I
);
6073 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) {
6074 struct pt_regs
*regs
= ia64_task_regs(task
);
6076 BUG_ON(ctx
->ctx_smpl_hdr
);
6078 pfm_force_cleanup(ctx
, regs
);
6080 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6083 * this one (kmalloc'ed) is fine with interrupts disabled
6085 pfm_context_free(ctx
);
6091 * we restore ALL the debug registers to avoid picking up
6094 if (ctx
->ctx_fl_using_dbreg
) {
6095 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6096 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6099 * retrieve saved psr.up
6101 psr_up
= ctx
->ctx_saved_psr_up
;
6104 * if we were the last user of the PMU on that CPU,
6105 * then nothing to do except restore psr
6107 if (GET_LAST_CPU(ctx
) == smp_processor_id() && ctx
->ctx_last_activation
== GET_ACTIVATION()) {
6110 * retrieve partial reload masks (due to user modifications)
6112 pmc_mask
= ctx
->ctx_reload_pmcs
[0];
6113 pmd_mask
= ctx
->ctx_reload_pmds
[0];
6117 * To avoid leaking information to the user level when psr.sp=0,
6118 * we must reload ALL implemented pmds (even the ones we don't use).
6119 * In the kernel we only allow PFM_READ_PMDS on registers which
6120 * we initialized or requested (sampling) so there is no risk there.
6122 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6125 * ALL accessible PMCs are systematically reloaded, unused registers
6126 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6127 * up stale configuration.
6129 * PMC0 is never in the mask. It is always restored separately.
6131 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6134 * when context is MASKED, we will restore PMC with plm=0
6135 * and PMD with stale information, but that's ok, nothing
6138 * XXX: optimize here
6140 if (pmd_mask
) pfm_restore_pmds(t
->pmds
, pmd_mask
);
6141 if (pmc_mask
) pfm_restore_pmcs(t
->pmcs
, pmc_mask
);
6144 * check for pending overflow at the time the state
6147 if (unlikely(PMC0_HAS_OVFL(t
->pmcs
[0]))) {
6149 * reload pmc0 with the overflow information
6150 * On McKinley PMU, this will trigger a PMU interrupt
6152 ia64_set_pmc(0, t
->pmcs
[0]);
6157 * will replay the PMU interrupt
6159 if (need_irq_resend
) hw_resend_irq(NULL
, IA64_PERFMON_VECTOR
);
6161 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6165 * we just did a reload, so we reset the partial reload fields
6167 ctx
->ctx_reload_pmcs
[0] = 0UL;
6168 ctx
->ctx_reload_pmds
[0] = 0UL;
6170 SET_LAST_CPU(ctx
, smp_processor_id());
6173 * dump activation value for this PMU
6177 * record current activation for this context
6179 SET_ACTIVATION(ctx
);
6182 * establish new ownership.
6184 SET_PMU_OWNER(task
, ctx
);
6187 * restore the psr.up bit. measurement
6189 * no PMU interrupt can happen at this point
6190 * because we still have interrupts disabled.
6192 if (likely(psr_up
)) pfm_set_psr_up();
6195 * allow concurrent access to context
6197 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6199 #else /* !CONFIG_SMP */
6201 * reload PMU state for UP kernels
6202 * in 2.5 we come here with interrupts disabled
6205 pfm_load_regs (struct task_struct
*task
)
6207 struct thread_struct
*t
;
6209 struct task_struct
*owner
;
6210 unsigned long pmd_mask
, pmc_mask
;
6212 int need_irq_resend
;
6214 owner
= GET_PMU_OWNER();
6215 ctx
= PFM_GET_CTX(task
);
6217 psr
= pfm_get_psr();
6219 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6220 BUG_ON(psr
& IA64_PSR_I
);
6223 * we restore ALL the debug registers to avoid picking up
6226 * This must be done even when the task is still the owner
6227 * as the registers may have been modified via ptrace()
6228 * (not perfmon) by the previous task.
6230 if (ctx
->ctx_fl_using_dbreg
) {
6231 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6232 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6236 * retrieved saved psr.up
6238 psr_up
= ctx
->ctx_saved_psr_up
;
6239 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6242 * short path, our state is still there, just
6243 * need to restore psr and we go
6245 * we do not touch either PMC nor PMD. the psr is not touched
6246 * by the overflow_handler. So we are safe w.r.t. to interrupt
6247 * concurrency even without interrupt masking.
6249 if (likely(owner
== task
)) {
6250 if (likely(psr_up
)) pfm_set_psr_up();
6255 * someone else is still using the PMU, first push it out and
6256 * then we'll be able to install our stuff !
6258 * Upon return, there will be no owner for the current PMU
6260 if (owner
) pfm_lazy_save_regs(owner
);
6263 * To avoid leaking information to the user level when psr.sp=0,
6264 * we must reload ALL implemented pmds (even the ones we don't use).
6265 * In the kernel we only allow PFM_READ_PMDS on registers which
6266 * we initialized or requested (sampling) so there is no risk there.
6268 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6271 * ALL accessible PMCs are systematically reloaded, unused registers
6272 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6273 * up stale configuration.
6275 * PMC0 is never in the mask. It is always restored separately
6277 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6279 pfm_restore_pmds(t
->pmds
, pmd_mask
);
6280 pfm_restore_pmcs(t
->pmcs
, pmc_mask
);
6283 * check for pending overflow at the time the state
6286 if (unlikely(PMC0_HAS_OVFL(t
->pmcs
[0]))) {
6288 * reload pmc0 with the overflow information
6289 * On McKinley PMU, this will trigger a PMU interrupt
6291 ia64_set_pmc(0, t
->pmcs
[0]);
6297 * will replay the PMU interrupt
6299 if (need_irq_resend
) hw_resend_irq(NULL
, IA64_PERFMON_VECTOR
);
6301 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6305 * establish new ownership.
6307 SET_PMU_OWNER(task
, ctx
);
6310 * restore the psr.up bit. measurement
6312 * no PMU interrupt can happen at this point
6313 * because we still have interrupts disabled.
6315 if (likely(psr_up
)) pfm_set_psr_up();
6317 #endif /* CONFIG_SMP */
6320 * this function assumes monitoring is stopped
6323 pfm_flush_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
6326 unsigned long mask2
, val
, pmd_val
, ovfl_val
;
6327 int i
, can_access_pmu
= 0;
6331 * is the caller the task being monitored (or which initiated the
6332 * session for system wide measurements)
6334 is_self
= ctx
->ctx_task
== task
? 1 : 0;
6337 * can access PMU is task is the owner of the PMU state on the current CPU
6338 * or if we are running on the CPU bound to the context in system-wide mode
6339 * (that is not necessarily the task the context is attached to in this mode).
6340 * In system-wide we always have can_access_pmu true because a task running on an
6341 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6343 can_access_pmu
= (GET_PMU_OWNER() == task
) || (ctx
->ctx_fl_system
&& ctx
->ctx_cpu
== smp_processor_id());
6344 if (can_access_pmu
) {
6346 * Mark the PMU as not owned
6347 * This will cause the interrupt handler to do nothing in case an overflow
6348 * interrupt was in-flight
6349 * This also guarantees that pmc0 will contain the final state
6350 * It virtually gives us full control on overflow processing from that point
6353 SET_PMU_OWNER(NULL
, NULL
);
6354 DPRINT(("releasing ownership\n"));
6357 * read current overflow status:
6359 * we are guaranteed to read the final stable state
6362 pmc0
= ia64_get_pmc(0); /* slow */
6365 * reset freeze bit, overflow status information destroyed
6369 pmc0
= task
->thread
.pmcs
[0];
6371 * clear whatever overflow status bits there were
6373 task
->thread
.pmcs
[0] = 0;
6375 ovfl_val
= pmu_conf
->ovfl_val
;
6377 * we save all the used pmds
6378 * we take care of overflows for counting PMDs
6380 * XXX: sampling situation is not taken into account here
6382 mask2
= ctx
->ctx_used_pmds
[0];
6384 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self
, ovfl_val
, mask2
));
6386 for (i
= 0; mask2
; i
++, mask2
>>=1) {
6388 /* skip non used pmds */
6389 if ((mask2
& 0x1) == 0) continue;
6392 * can access PMU always true in system wide mode
6394 val
= pmd_val
= can_access_pmu
? ia64_get_pmd(i
) : task
->thread
.pmds
[i
];
6396 if (PMD_IS_COUNTING(i
)) {
6397 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6400 ctx
->ctx_pmds
[i
].val
,
6404 * we rebuild the full 64 bit value of the counter
6406 val
= ctx
->ctx_pmds
[i
].val
+ (val
& ovfl_val
);
6409 * now everything is in ctx_pmds[] and we need
6410 * to clear the saved context from save_regs() such that
6411 * pfm_read_pmds() gets the correct value
6416 * take care of overflow inline
6418 if (pmc0
& (1UL << i
)) {
6419 val
+= 1 + ovfl_val
;
6420 DPRINT(("[%d] pmd[%d] overflowed\n", task
->pid
, i
));
6424 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task
->pid
, i
, val
, pmd_val
));
6426 if (is_self
) task
->thread
.pmds
[i
] = pmd_val
;
6428 ctx
->ctx_pmds
[i
].val
= val
;
6432 static struct irqaction perfmon_irqaction
= {
6433 .handler
= pfm_interrupt_handler
,
6434 .flags
= SA_INTERRUPT
,
6439 pfm_alt_save_pmu_state(void *data
)
6441 struct pt_regs
*regs
;
6443 regs
= ia64_task_regs(current
);
6445 DPRINT(("called\n"));
6448 * should not be necessary but
6449 * let's take not risk
6453 ia64_psr(regs
)->pp
= 0;
6456 * This call is required
6457 * May cause a spurious interrupt on some processors
6465 pfm_alt_restore_pmu_state(void *data
)
6467 struct pt_regs
*regs
;
6469 regs
= ia64_task_regs(current
);
6471 DPRINT(("called\n"));
6474 * put PMU back in state expected
6479 ia64_psr(regs
)->pp
= 0;
6482 * perfmon runs with PMU unfrozen at all times
6490 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6495 /* some sanity checks */
6496 if (hdl
== NULL
|| hdl
->handler
== NULL
) return -EINVAL
;
6498 /* do the easy test first */
6499 if (pfm_alt_intr_handler
) return -EBUSY
;
6501 /* one at a time in the install or remove, just fail the others */
6502 if (!spin_trylock(&pfm_alt_install_check
)) {
6506 /* reserve our session */
6507 for_each_online_cpu(reserve_cpu
) {
6508 ret
= pfm_reserve_session(NULL
, 1, reserve_cpu
);
6509 if (ret
) goto cleanup_reserve
;
6512 /* save the current system wide pmu states */
6513 ret
= on_each_cpu(pfm_alt_save_pmu_state
, NULL
, 0, 1);
6515 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6516 goto cleanup_reserve
;
6519 /* officially change to the alternate interrupt handler */
6520 pfm_alt_intr_handler
= hdl
;
6522 spin_unlock(&pfm_alt_install_check
);
6527 for_each_online_cpu(i
) {
6528 /* don't unreserve more than we reserved */
6529 if (i
>= reserve_cpu
) break;
6531 pfm_unreserve_session(NULL
, 1, i
);
6534 spin_unlock(&pfm_alt_install_check
);
6538 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt
);
6541 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6546 if (hdl
== NULL
) return -EINVAL
;
6548 /* cannot remove someone else's handler! */
6549 if (pfm_alt_intr_handler
!= hdl
) return -EINVAL
;
6551 /* one at a time in the install or remove, just fail the others */
6552 if (!spin_trylock(&pfm_alt_install_check
)) {
6556 pfm_alt_intr_handler
= NULL
;
6558 ret
= on_each_cpu(pfm_alt_restore_pmu_state
, NULL
, 0, 1);
6560 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6563 for_each_online_cpu(i
) {
6564 pfm_unreserve_session(NULL
, 1, i
);
6567 spin_unlock(&pfm_alt_install_check
);
6571 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt
);
6574 * perfmon initialization routine, called from the initcall() table
6576 static int init_pfm_fs(void);
6584 family
= local_cpu_data
->family
;
6589 if ((*p
)->probe() == 0) goto found
;
6590 } else if ((*p
)->pmu_family
== family
|| (*p
)->pmu_family
== 0xff) {
6601 static struct file_operations pfm_proc_fops
= {
6602 .open
= pfm_proc_open
,
6604 .llseek
= seq_lseek
,
6605 .release
= seq_release
,
6611 unsigned int n
, n_counters
, i
;
6613 printk("perfmon: version %u.%u IRQ %u\n",
6616 IA64_PERFMON_VECTOR
);
6618 if (pfm_probe_pmu()) {
6619 printk(KERN_INFO
"perfmon: disabled, there is no support for processor family %d\n",
6620 local_cpu_data
->family
);
6625 * compute the number of implemented PMD/PMC from the
6626 * description tables
6629 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
6630 if (PMC_IS_IMPL(i
) == 0) continue;
6631 pmu_conf
->impl_pmcs
[i
>>6] |= 1UL << (i
&63);
6634 pmu_conf
->num_pmcs
= n
;
6636 n
= 0; n_counters
= 0;
6637 for (i
=0; PMD_IS_LAST(i
) == 0; i
++) {
6638 if (PMD_IS_IMPL(i
) == 0) continue;
6639 pmu_conf
->impl_pmds
[i
>>6] |= 1UL << (i
&63);
6641 if (PMD_IS_COUNTING(i
)) n_counters
++;
6643 pmu_conf
->num_pmds
= n
;
6644 pmu_conf
->num_counters
= n_counters
;
6647 * sanity checks on the number of debug registers
6649 if (pmu_conf
->use_rr_dbregs
) {
6650 if (pmu_conf
->num_ibrs
> IA64_NUM_DBG_REGS
) {
6651 printk(KERN_INFO
"perfmon: unsupported number of code debug registers (%u)\n", pmu_conf
->num_ibrs
);
6655 if (pmu_conf
->num_dbrs
> IA64_NUM_DBG_REGS
) {
6656 printk(KERN_INFO
"perfmon: unsupported number of data debug registers (%u)\n", pmu_conf
->num_ibrs
);
6662 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6666 pmu_conf
->num_counters
,
6667 ffz(pmu_conf
->ovfl_val
));
6670 if (pmu_conf
->num_pmds
>= IA64_NUM_PMD_REGS
|| pmu_conf
->num_pmcs
>= IA64_NUM_PMC_REGS
) {
6671 printk(KERN_ERR
"perfmon: not enough pmc/pmd, perfmon disabled\n");
6677 * create /proc/perfmon (mostly for debugging purposes)
6679 perfmon_dir
= create_proc_entry("perfmon", S_IRUGO
, NULL
);
6680 if (perfmon_dir
== NULL
) {
6681 printk(KERN_ERR
"perfmon: cannot create /proc entry, perfmon disabled\n");
6686 * install customized file operations for /proc/perfmon entry
6688 perfmon_dir
->proc_fops
= &pfm_proc_fops
;
6691 * create /proc/sys/kernel/perfmon (for debugging purposes)
6693 pfm_sysctl_header
= register_sysctl_table(pfm_sysctl_root
, 0);
6696 * initialize all our spinlocks
6698 spin_lock_init(&pfm_sessions
.pfs_lock
);
6699 spin_lock_init(&pfm_buffer_fmt_lock
);
6703 for(i
=0; i
< NR_CPUS
; i
++) pfm_stats
[i
].pfm_ovfl_intr_cycles_min
= ~0UL;
6708 __initcall(pfm_init
);
6711 * this function is called before pfm_init()
6714 pfm_init_percpu (void)
6717 * make sure no measurement is active
6718 * (may inherit programmed PMCs from EFI).
6724 * we run with the PMU not frozen at all times
6728 if (smp_processor_id() == 0)
6729 register_percpu_irq(IA64_PERFMON_VECTOR
, &perfmon_irqaction
);
6731 ia64_setreg(_IA64_REG_CR_PMV
, IA64_PERFMON_VECTOR
);
6736 * used for debug purposes only
6739 dump_pmu_state(const char *from
)
6741 struct task_struct
*task
;
6742 struct thread_struct
*t
;
6743 struct pt_regs
*regs
;
6745 unsigned long psr
, dcr
, info
, flags
;
6748 local_irq_save(flags
);
6750 this_cpu
= smp_processor_id();
6751 regs
= ia64_task_regs(current
);
6752 info
= PFM_CPUINFO_GET();
6753 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
6755 if (info
== 0 && ia64_psr(regs
)->pp
== 0 && (dcr
& IA64_DCR_PP
) == 0) {
6756 local_irq_restore(flags
);
6760 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6767 task
= GET_PMU_OWNER();
6768 ctx
= GET_PMU_CTX();
6770 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu
, task
? task
->pid
: -1, ctx
);
6772 psr
= pfm_get_psr();
6774 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6777 psr
& IA64_PSR_PP
? 1 : 0,
6778 psr
& IA64_PSR_UP
? 1 : 0,
6779 dcr
& IA64_DCR_PP
? 1 : 0,
6782 ia64_psr(regs
)->pp
);
6784 ia64_psr(regs
)->up
= 0;
6785 ia64_psr(regs
)->pp
= 0;
6787 t
= ¤t
->thread
;
6789 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
6790 if (PMC_IS_IMPL(i
) == 0) continue;
6791 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmc(i
), i
, t
->pmcs
[i
]);
6794 for (i
=1; PMD_IS_LAST(i
) == 0; i
++) {
6795 if (PMD_IS_IMPL(i
) == 0) continue;
6796 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmd(i
), i
, t
->pmds
[i
]);
6800 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6803 ctx
->ctx_smpl_vaddr
,
6807 ctx
->ctx_saved_psr_up
);
6809 local_irq_restore(flags
);
6813 * called from process.c:copy_thread(). task is new child.
6816 pfm_inherit(struct task_struct
*task
, struct pt_regs
*regs
)
6818 struct thread_struct
*thread
;
6820 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task
->pid
));
6822 thread
= &task
->thread
;
6825 * cut links inherited from parent (current)
6827 thread
->pfm_context
= NULL
;
6829 PFM_SET_WORK_PENDING(task
, 0);
6832 * the psr bits are already set properly in copy_threads()
6835 #else /* !CONFIG_PERFMON */
6837 sys_perfmonctl (int fd
, int cmd
, void *arg
, int count
)
6841 #endif /* CONFIG_PERFMON */