USB: Free bandwidth when usb_disable_device is called.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / ia64 / kernel / perfmon.c
blob89accc626b86ce304128c59a2c1bfc499e6b33d4
1 /*
2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
14 * Copyright (C) 1999-2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mm.h>
31 #include <linux/sysctl.h>
32 #include <linux/list.h>
33 #include <linux/file.h>
34 #include <linux/poll.h>
35 #include <linux/vfs.h>
36 #include <linux/smp.h>
37 #include <linux/pagemap.h>
38 #include <linux/mount.h>
39 #include <linux/bitops.h>
40 #include <linux/capability.h>
41 #include <linux/rcupdate.h>
42 #include <linux/completion.h>
43 #include <linux/tracehook.h>
44 #include <linux/slab.h>
46 #include <asm/errno.h>
47 #include <asm/intrinsics.h>
48 #include <asm/page.h>
49 #include <asm/perfmon.h>
50 #include <asm/processor.h>
51 #include <asm/signal.h>
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
54 #include <asm/delay.h>
56 #ifdef CONFIG_PERFMON
58 * perfmon context state
60 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
61 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
62 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
63 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
65 #define PFM_INVALID_ACTIVATION (~0UL)
67 #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
68 #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
71 * depth of message queue
73 #define PFM_MAX_MSGS 32
74 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
77 * type of a PMU register (bitmask).
78 * bitmask structure:
79 * bit0 : register implemented
80 * bit1 : end marker
81 * bit2-3 : reserved
82 * bit4 : pmc has pmc.pm
83 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
84 * bit6-7 : register type
85 * bit8-31: reserved
87 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
88 #define PFM_REG_IMPL 0x1 /* register implemented */
89 #define PFM_REG_END 0x2 /* end marker */
90 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
91 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
92 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
93 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
94 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
96 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
97 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
99 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
101 /* i assumed unsigned */
102 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
103 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
105 /* XXX: these assume that register i is implemented */
106 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
108 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
109 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
111 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
112 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
113 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
114 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
116 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
117 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
119 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
120 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
121 #define PFM_CTX_TASK(h) (h)->ctx_task
123 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
125 /* XXX: does not support more than 64 PMDs */
126 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
127 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
129 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
131 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
132 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
133 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
134 #define PFM_CODE_RR 0 /* requesting code range restriction */
135 #define PFM_DATA_RR 1 /* requestion data range restriction */
137 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
138 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
139 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
141 #define RDEP(x) (1UL<<(x))
144 * context protection macros
145 * in SMP:
146 * - we need to protect against CPU concurrency (spin_lock)
147 * - we need to protect against PMU overflow interrupts (local_irq_disable)
148 * in UP:
149 * - we need to protect against PMU overflow interrupts (local_irq_disable)
151 * spin_lock_irqsave()/spin_unlock_irqrestore():
152 * in SMP: local_irq_disable + spin_lock
153 * in UP : local_irq_disable
155 * spin_lock()/spin_lock():
156 * in UP : removed automatically
157 * in SMP: protect against context accesses from other CPU. interrupts
158 * are not masked. This is useful for the PMU interrupt handler
159 * because we know we will not get PMU concurrency in that code.
161 #define PROTECT_CTX(c, f) \
162 do { \
163 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 spin_lock_irqsave(&(c)->ctx_lock, f); \
165 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
166 } while(0)
168 #define UNPROTECT_CTX(c, f) \
169 do { \
170 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
171 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
172 } while(0)
174 #define PROTECT_CTX_NOPRINT(c, f) \
175 do { \
176 spin_lock_irqsave(&(c)->ctx_lock, f); \
177 } while(0)
180 #define UNPROTECT_CTX_NOPRINT(c, f) \
181 do { \
182 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
183 } while(0)
186 #define PROTECT_CTX_NOIRQ(c) \
187 do { \
188 spin_lock(&(c)->ctx_lock); \
189 } while(0)
191 #define UNPROTECT_CTX_NOIRQ(c) \
192 do { \
193 spin_unlock(&(c)->ctx_lock); \
194 } while(0)
197 #ifdef CONFIG_SMP
199 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
200 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
201 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
203 #else /* !CONFIG_SMP */
204 #define SET_ACTIVATION(t) do {} while(0)
205 #define GET_ACTIVATION(t) do {} while(0)
206 #define INC_ACTIVATION(t) do {} while(0)
207 #endif /* CONFIG_SMP */
209 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
210 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
211 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
213 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
214 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
216 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
219 * cmp0 must be the value of pmc0
221 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
223 #define PFMFS_MAGIC 0xa0b4d889
226 * debugging
228 #define PFM_DEBUGGING 1
229 #ifdef PFM_DEBUGGING
230 #define DPRINT(a) \
231 do { \
232 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
233 } while (0)
235 #define DPRINT_ovfl(a) \
236 do { \
237 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
238 } while (0)
239 #endif
242 * 64-bit software counter structure
244 * the next_reset_type is applied to the next call to pfm_reset_regs()
246 typedef struct {
247 unsigned long val; /* virtual 64bit counter value */
248 unsigned long lval; /* last reset value */
249 unsigned long long_reset; /* reset value on sampling overflow */
250 unsigned long short_reset; /* reset value on overflow */
251 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
252 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
253 unsigned long seed; /* seed for random-number generator */
254 unsigned long mask; /* mask for random-number generator */
255 unsigned int flags; /* notify/do not notify */
256 unsigned long eventid; /* overflow event identifier */
257 } pfm_counter_t;
260 * context flags
262 typedef struct {
263 unsigned int block:1; /* when 1, task will blocked on user notifications */
264 unsigned int system:1; /* do system wide monitoring */
265 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
266 unsigned int is_sampling:1; /* true if using a custom format */
267 unsigned int excl_idle:1; /* exclude idle task in system wide session */
268 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
269 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
270 unsigned int no_msg:1; /* no message sent on overflow */
271 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
272 unsigned int reserved:22;
273 } pfm_context_flags_t;
275 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
276 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
277 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
281 * perfmon context: encapsulates all the state of a monitoring session
284 typedef struct pfm_context {
285 spinlock_t ctx_lock; /* context protection */
287 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
288 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
290 struct task_struct *ctx_task; /* task to which context is attached */
292 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
294 struct completion ctx_restart_done; /* use for blocking notification mode */
296 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
297 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
298 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
300 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
301 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
302 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
304 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
306 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
307 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
308 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
309 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
311 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
313 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
314 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
316 unsigned long ctx_saved_psr_up; /* only contains psr.up value */
318 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
319 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
320 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
322 int ctx_fd; /* file descriptor used my this context */
323 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
325 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
326 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
327 unsigned long ctx_smpl_size; /* size of sampling buffer */
328 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
330 wait_queue_head_t ctx_msgq_wait;
331 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
332 int ctx_msgq_head;
333 int ctx_msgq_tail;
334 struct fasync_struct *ctx_async_queue;
336 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
337 } pfm_context_t;
340 * magic number used to verify that structure is really
341 * a perfmon context
343 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
345 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
347 #ifdef CONFIG_SMP
348 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
349 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
350 #else
351 #define SET_LAST_CPU(ctx, v) do {} while(0)
352 #define GET_LAST_CPU(ctx) do {} while(0)
353 #endif
356 #define ctx_fl_block ctx_flags.block
357 #define ctx_fl_system ctx_flags.system
358 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
359 #define ctx_fl_is_sampling ctx_flags.is_sampling
360 #define ctx_fl_excl_idle ctx_flags.excl_idle
361 #define ctx_fl_going_zombie ctx_flags.going_zombie
362 #define ctx_fl_trap_reason ctx_flags.trap_reason
363 #define ctx_fl_no_msg ctx_flags.no_msg
364 #define ctx_fl_can_restart ctx_flags.can_restart
366 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
367 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
370 * global information about all sessions
371 * mostly used to synchronize between system wide and per-process
373 typedef struct {
374 spinlock_t pfs_lock; /* lock the structure */
376 unsigned int pfs_task_sessions; /* number of per task sessions */
377 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
378 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
379 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
380 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
381 } pfm_session_t;
384 * information about a PMC or PMD.
385 * dep_pmd[]: a bitmask of dependent PMD registers
386 * dep_pmc[]: a bitmask of dependent PMC registers
388 typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
389 typedef struct {
390 unsigned int type;
391 int pm_pos;
392 unsigned long default_value; /* power-on default value */
393 unsigned long reserved_mask; /* bitmask of reserved bits */
394 pfm_reg_check_t read_check;
395 pfm_reg_check_t write_check;
396 unsigned long dep_pmd[4];
397 unsigned long dep_pmc[4];
398 } pfm_reg_desc_t;
400 /* assume cnum is a valid monitor */
401 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
404 * This structure is initialized at boot time and contains
405 * a description of the PMU main characteristics.
407 * If the probe function is defined, detection is based
408 * on its return value:
409 * - 0 means recognized PMU
410 * - anything else means not supported
411 * When the probe function is not defined, then the pmu_family field
412 * is used and it must match the host CPU family such that:
413 * - cpu->family & config->pmu_family != 0
415 typedef struct {
416 unsigned long ovfl_val; /* overflow value for counters */
418 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
419 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
421 unsigned int num_pmcs; /* number of PMCS: computed at init time */
422 unsigned int num_pmds; /* number of PMDS: computed at init time */
423 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
424 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
426 char *pmu_name; /* PMU family name */
427 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
428 unsigned int flags; /* pmu specific flags */
429 unsigned int num_ibrs; /* number of IBRS: computed at init time */
430 unsigned int num_dbrs; /* number of DBRS: computed at init time */
431 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
432 int (*probe)(void); /* customized probe routine */
433 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
434 } pmu_config_t;
436 * PMU specific flags
438 #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
441 * debug register related type definitions
443 typedef struct {
444 unsigned long ibr_mask:56;
445 unsigned long ibr_plm:4;
446 unsigned long ibr_ig:3;
447 unsigned long ibr_x:1;
448 } ibr_mask_reg_t;
450 typedef struct {
451 unsigned long dbr_mask:56;
452 unsigned long dbr_plm:4;
453 unsigned long dbr_ig:2;
454 unsigned long dbr_w:1;
455 unsigned long dbr_r:1;
456 } dbr_mask_reg_t;
458 typedef union {
459 unsigned long val;
460 ibr_mask_reg_t ibr;
461 dbr_mask_reg_t dbr;
462 } dbreg_t;
466 * perfmon command descriptions
468 typedef struct {
469 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
470 char *cmd_name;
471 int cmd_flags;
472 unsigned int cmd_narg;
473 size_t cmd_argsize;
474 int (*cmd_getsize)(void *arg, size_t *sz);
475 } pfm_cmd_desc_t;
477 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
478 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
479 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
480 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
483 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
484 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
485 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
486 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
487 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
489 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
491 typedef struct {
492 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
493 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
494 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
495 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
496 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
497 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
498 unsigned long pfm_smpl_handler_calls;
499 unsigned long pfm_smpl_handler_cycles;
500 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
501 } pfm_stats_t;
504 * perfmon internal variables
506 static pfm_stats_t pfm_stats[NR_CPUS];
507 static pfm_session_t pfm_sessions; /* global sessions information */
509 static DEFINE_SPINLOCK(pfm_alt_install_check);
510 static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
512 static struct proc_dir_entry *perfmon_dir;
513 static pfm_uuid_t pfm_null_uuid = {0,};
515 static spinlock_t pfm_buffer_fmt_lock;
516 static LIST_HEAD(pfm_buffer_fmt_list);
518 static pmu_config_t *pmu_conf;
520 /* sysctl() controls */
521 pfm_sysctl_t pfm_sysctl;
522 EXPORT_SYMBOL(pfm_sysctl);
524 static ctl_table pfm_ctl_table[]={
526 .procname = "debug",
527 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int),
529 .mode = 0666,
530 .proc_handler = proc_dointvec,
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = proc_dointvec,
540 .procname = "fastctxsw",
541 .data = &pfm_sysctl.fastctxsw,
542 .maxlen = sizeof(int),
543 .mode = 0600,
544 .proc_handler = proc_dointvec,
547 .procname = "expert_mode",
548 .data = &pfm_sysctl.expert_mode,
549 .maxlen = sizeof(int),
550 .mode = 0600,
551 .proc_handler = proc_dointvec,
555 static ctl_table pfm_sysctl_dir[] = {
557 .procname = "perfmon",
558 .mode = 0555,
559 .child = pfm_ctl_table,
563 static ctl_table pfm_sysctl_root[] = {
565 .procname = "kernel",
566 .mode = 0555,
567 .child = pfm_sysctl_dir,
571 static struct ctl_table_header *pfm_sysctl_header;
573 static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
575 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
576 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
578 static inline void
579 pfm_put_task(struct task_struct *task)
581 if (task != current) put_task_struct(task);
584 static inline void
585 pfm_reserve_page(unsigned long a)
587 SetPageReserved(vmalloc_to_page((void *)a));
589 static inline void
590 pfm_unreserve_page(unsigned long a)
592 ClearPageReserved(vmalloc_to_page((void*)a));
595 static inline unsigned long
596 pfm_protect_ctx_ctxsw(pfm_context_t *x)
598 spin_lock(&(x)->ctx_lock);
599 return 0UL;
602 static inline void
603 pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
605 spin_unlock(&(x)->ctx_lock);
608 static inline unsigned int
609 pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
611 return do_munmap(mm, addr, len);
614 static inline unsigned long
615 pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
617 return get_unmapped_area(file, addr, len, pgoff, flags);
620 /* forward declaration */
621 static const struct dentry_operations pfmfs_dentry_operations;
623 static struct dentry *
624 pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
626 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
627 PFMFS_MAGIC);
630 static struct file_system_type pfm_fs_type = {
631 .name = "pfmfs",
632 .mount = pfmfs_mount,
633 .kill_sb = kill_anon_super,
636 DEFINE_PER_CPU(unsigned long, pfm_syst_info);
637 DEFINE_PER_CPU(struct task_struct *, pmu_owner);
638 DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
639 DEFINE_PER_CPU(unsigned long, pmu_activation_number);
640 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
643 /* forward declaration */
644 static const struct file_operations pfm_file_ops;
647 * forward declarations
649 #ifndef CONFIG_SMP
650 static void pfm_lazy_save_regs (struct task_struct *ta);
651 #endif
653 void dump_pmu_state(const char *);
654 static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
656 #include "perfmon_itanium.h"
657 #include "perfmon_mckinley.h"
658 #include "perfmon_montecito.h"
659 #include "perfmon_generic.h"
661 static pmu_config_t *pmu_confs[]={
662 &pmu_conf_mont,
663 &pmu_conf_mck,
664 &pmu_conf_ita,
665 &pmu_conf_gen, /* must be last */
666 NULL
670 static int pfm_end_notify_user(pfm_context_t *ctx);
672 static inline void
673 pfm_clear_psr_pp(void)
675 ia64_rsm(IA64_PSR_PP);
676 ia64_srlz_i();
679 static inline void
680 pfm_set_psr_pp(void)
682 ia64_ssm(IA64_PSR_PP);
683 ia64_srlz_i();
686 static inline void
687 pfm_clear_psr_up(void)
689 ia64_rsm(IA64_PSR_UP);
690 ia64_srlz_i();
693 static inline void
694 pfm_set_psr_up(void)
696 ia64_ssm(IA64_PSR_UP);
697 ia64_srlz_i();
700 static inline unsigned long
701 pfm_get_psr(void)
703 unsigned long tmp;
704 tmp = ia64_getreg(_IA64_REG_PSR);
705 ia64_srlz_i();
706 return tmp;
709 static inline void
710 pfm_set_psr_l(unsigned long val)
712 ia64_setreg(_IA64_REG_PSR_L, val);
713 ia64_srlz_i();
716 static inline void
717 pfm_freeze_pmu(void)
719 ia64_set_pmc(0,1UL);
720 ia64_srlz_d();
723 static inline void
724 pfm_unfreeze_pmu(void)
726 ia64_set_pmc(0,0UL);
727 ia64_srlz_d();
730 static inline void
731 pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
733 int i;
735 for (i=0; i < nibrs; i++) {
736 ia64_set_ibr(i, ibrs[i]);
737 ia64_dv_serialize_instruction();
739 ia64_srlz_i();
742 static inline void
743 pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
745 int i;
747 for (i=0; i < ndbrs; i++) {
748 ia64_set_dbr(i, dbrs[i]);
749 ia64_dv_serialize_data();
751 ia64_srlz_d();
755 * PMD[i] must be a counter. no check is made
757 static inline unsigned long
758 pfm_read_soft_counter(pfm_context_t *ctx, int i)
760 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
764 * PMD[i] must be a counter. no check is made
766 static inline void
767 pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
769 unsigned long ovfl_val = pmu_conf->ovfl_val;
771 ctx->ctx_pmds[i].val = val & ~ovfl_val;
773 * writing to unimplemented part is ignore, so we do not need to
774 * mask off top part
776 ia64_set_pmd(i, val & ovfl_val);
779 static pfm_msg_t *
780 pfm_get_new_msg(pfm_context_t *ctx)
782 int idx, next;
784 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
786 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
787 if (next == ctx->ctx_msgq_head) return NULL;
789 idx = ctx->ctx_msgq_tail;
790 ctx->ctx_msgq_tail = next;
792 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
794 return ctx->ctx_msgq+idx;
797 static pfm_msg_t *
798 pfm_get_next_msg(pfm_context_t *ctx)
800 pfm_msg_t *msg;
802 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
804 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
807 * get oldest message
809 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
812 * and move forward
814 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
816 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
818 return msg;
821 static void
822 pfm_reset_msgq(pfm_context_t *ctx)
824 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
825 DPRINT(("ctx=%p msgq reset\n", ctx));
828 static void *
829 pfm_rvmalloc(unsigned long size)
831 void *mem;
832 unsigned long addr;
834 size = PAGE_ALIGN(size);
835 mem = vzalloc(size);
836 if (mem) {
837 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
838 addr = (unsigned long)mem;
839 while (size > 0) {
840 pfm_reserve_page(addr);
841 addr+=PAGE_SIZE;
842 size-=PAGE_SIZE;
845 return mem;
848 static void
849 pfm_rvfree(void *mem, unsigned long size)
851 unsigned long addr;
853 if (mem) {
854 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
855 addr = (unsigned long) mem;
856 while ((long) size > 0) {
857 pfm_unreserve_page(addr);
858 addr+=PAGE_SIZE;
859 size-=PAGE_SIZE;
861 vfree(mem);
863 return;
866 static pfm_context_t *
867 pfm_context_alloc(int ctx_flags)
869 pfm_context_t *ctx;
872 * allocate context descriptor
873 * must be able to free with interrupts disabled
875 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
876 if (ctx) {
877 DPRINT(("alloc ctx @%p\n", ctx));
880 * init context protection lock
882 spin_lock_init(&ctx->ctx_lock);
885 * context is unloaded
887 ctx->ctx_state = PFM_CTX_UNLOADED;
890 * initialization of context's flags
892 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
893 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
894 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
896 * will move to set properties
897 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
901 * init restart semaphore to locked
903 init_completion(&ctx->ctx_restart_done);
906 * activation is used in SMP only
908 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
909 SET_LAST_CPU(ctx, -1);
912 * initialize notification message queue
914 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
915 init_waitqueue_head(&ctx->ctx_msgq_wait);
916 init_waitqueue_head(&ctx->ctx_zombieq);
919 return ctx;
922 static void
923 pfm_context_free(pfm_context_t *ctx)
925 if (ctx) {
926 DPRINT(("free ctx @%p\n", ctx));
927 kfree(ctx);
931 static void
932 pfm_mask_monitoring(struct task_struct *task)
934 pfm_context_t *ctx = PFM_GET_CTX(task);
935 unsigned long mask, val, ovfl_mask;
936 int i;
938 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
940 ovfl_mask = pmu_conf->ovfl_val;
942 * monitoring can only be masked as a result of a valid
943 * counter overflow. In UP, it means that the PMU still
944 * has an owner. Note that the owner can be different
945 * from the current task. However the PMU state belongs
946 * to the owner.
947 * In SMP, a valid overflow only happens when task is
948 * current. Therefore if we come here, we know that
949 * the PMU state belongs to the current task, therefore
950 * we can access the live registers.
952 * So in both cases, the live register contains the owner's
953 * state. We can ONLY touch the PMU registers and NOT the PSR.
955 * As a consequence to this call, the ctx->th_pmds[] array
956 * contains stale information which must be ignored
957 * when context is reloaded AND monitoring is active (see
958 * pfm_restart).
960 mask = ctx->ctx_used_pmds[0];
961 for (i = 0; mask; i++, mask>>=1) {
962 /* skip non used pmds */
963 if ((mask & 0x1) == 0) continue;
964 val = ia64_get_pmd(i);
966 if (PMD_IS_COUNTING(i)) {
968 * we rebuild the full 64 bit value of the counter
970 ctx->ctx_pmds[i].val += (val & ovfl_mask);
971 } else {
972 ctx->ctx_pmds[i].val = val;
974 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
976 ctx->ctx_pmds[i].val,
977 val & ovfl_mask));
980 * mask monitoring by setting the privilege level to 0
981 * we cannot use psr.pp/psr.up for this, it is controlled by
982 * the user
984 * if task is current, modify actual registers, otherwise modify
985 * thread save state, i.e., what will be restored in pfm_load_regs()
987 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
988 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
989 if ((mask & 0x1) == 0UL) continue;
990 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
991 ctx->th_pmcs[i] &= ~0xfUL;
992 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
995 * make all of this visible
997 ia64_srlz_d();
1001 * must always be done with task == current
1003 * context must be in MASKED state when calling
1005 static void
1006 pfm_restore_monitoring(struct task_struct *task)
1008 pfm_context_t *ctx = PFM_GET_CTX(task);
1009 unsigned long mask, ovfl_mask;
1010 unsigned long psr, val;
1011 int i, is_system;
1013 is_system = ctx->ctx_fl_system;
1014 ovfl_mask = pmu_conf->ovfl_val;
1016 if (task != current) {
1017 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1018 return;
1020 if (ctx->ctx_state != PFM_CTX_MASKED) {
1021 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1022 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1023 return;
1025 psr = pfm_get_psr();
1027 * monitoring is masked via the PMC.
1028 * As we restore their value, we do not want each counter to
1029 * restart right away. We stop monitoring using the PSR,
1030 * restore the PMC (and PMD) and then re-establish the psr
1031 * as it was. Note that there can be no pending overflow at
1032 * this point, because monitoring was MASKED.
1034 * system-wide session are pinned and self-monitoring
1036 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1037 /* disable dcr pp */
1038 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1039 pfm_clear_psr_pp();
1040 } else {
1041 pfm_clear_psr_up();
1044 * first, we restore the PMD
1046 mask = ctx->ctx_used_pmds[0];
1047 for (i = 0; mask; i++, mask>>=1) {
1048 /* skip non used pmds */
1049 if ((mask & 0x1) == 0) continue;
1051 if (PMD_IS_COUNTING(i)) {
1053 * we split the 64bit value according to
1054 * counter width
1056 val = ctx->ctx_pmds[i].val & ovfl_mask;
1057 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1058 } else {
1059 val = ctx->ctx_pmds[i].val;
1061 ia64_set_pmd(i, val);
1063 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1065 ctx->ctx_pmds[i].val,
1066 val));
1069 * restore the PMCs
1071 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1072 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1073 if ((mask & 0x1) == 0UL) continue;
1074 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1075 ia64_set_pmc(i, ctx->th_pmcs[i]);
1076 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1077 task_pid_nr(task), i, ctx->th_pmcs[i]));
1079 ia64_srlz_d();
1082 * must restore DBR/IBR because could be modified while masked
1083 * XXX: need to optimize
1085 if (ctx->ctx_fl_using_dbreg) {
1086 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1087 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1091 * now restore PSR
1093 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1094 /* enable dcr pp */
1095 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1096 ia64_srlz_i();
1098 pfm_set_psr_l(psr);
1101 static inline void
1102 pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1104 int i;
1106 ia64_srlz_d();
1108 for (i=0; mask; i++, mask>>=1) {
1109 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1114 * reload from thread state (used for ctxw only)
1116 static inline void
1117 pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1119 int i;
1120 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1122 for (i=0; mask; i++, mask>>=1) {
1123 if ((mask & 0x1) == 0) continue;
1124 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1125 ia64_set_pmd(i, val);
1127 ia64_srlz_d();
1131 * propagate PMD from context to thread-state
1133 static inline void
1134 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1136 unsigned long ovfl_val = pmu_conf->ovfl_val;
1137 unsigned long mask = ctx->ctx_all_pmds[0];
1138 unsigned long val;
1139 int i;
1141 DPRINT(("mask=0x%lx\n", mask));
1143 for (i=0; mask; i++, mask>>=1) {
1145 val = ctx->ctx_pmds[i].val;
1148 * We break up the 64 bit value into 2 pieces
1149 * the lower bits go to the machine state in the
1150 * thread (will be reloaded on ctxsw in).
1151 * The upper part stays in the soft-counter.
1153 if (PMD_IS_COUNTING(i)) {
1154 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1155 val &= ovfl_val;
1157 ctx->th_pmds[i] = val;
1159 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1161 ctx->th_pmds[i],
1162 ctx->ctx_pmds[i].val));
1167 * propagate PMC from context to thread-state
1169 static inline void
1170 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1172 unsigned long mask = ctx->ctx_all_pmcs[0];
1173 int i;
1175 DPRINT(("mask=0x%lx\n", mask));
1177 for (i=0; mask; i++, mask>>=1) {
1178 /* masking 0 with ovfl_val yields 0 */
1179 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1180 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1186 static inline void
1187 pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1189 int i;
1191 for (i=0; mask; i++, mask>>=1) {
1192 if ((mask & 0x1) == 0) continue;
1193 ia64_set_pmc(i, pmcs[i]);
1195 ia64_srlz_d();
1198 static inline int
1199 pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1201 return memcmp(a, b, sizeof(pfm_uuid_t));
1204 static inline int
1205 pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1207 int ret = 0;
1208 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1209 return ret;
1212 static inline int
1213 pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1215 int ret = 0;
1216 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1217 return ret;
1221 static inline int
1222 pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1223 int cpu, void *arg)
1225 int ret = 0;
1226 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1227 return ret;
1230 static inline int
1231 pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1232 int cpu, void *arg)
1234 int ret = 0;
1235 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1236 return ret;
1239 static inline int
1240 pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1242 int ret = 0;
1243 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1244 return ret;
1247 static inline int
1248 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1250 int ret = 0;
1251 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1252 return ret;
1255 static pfm_buffer_fmt_t *
1256 __pfm_find_buffer_fmt(pfm_uuid_t uuid)
1258 struct list_head * pos;
1259 pfm_buffer_fmt_t * entry;
1261 list_for_each(pos, &pfm_buffer_fmt_list) {
1262 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1263 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1264 return entry;
1266 return NULL;
1270 * find a buffer format based on its uuid
1272 static pfm_buffer_fmt_t *
1273 pfm_find_buffer_fmt(pfm_uuid_t uuid)
1275 pfm_buffer_fmt_t * fmt;
1276 spin_lock(&pfm_buffer_fmt_lock);
1277 fmt = __pfm_find_buffer_fmt(uuid);
1278 spin_unlock(&pfm_buffer_fmt_lock);
1279 return fmt;
1283 pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1285 int ret = 0;
1287 /* some sanity checks */
1288 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1290 /* we need at least a handler */
1291 if (fmt->fmt_handler == NULL) return -EINVAL;
1294 * XXX: need check validity of fmt_arg_size
1297 spin_lock(&pfm_buffer_fmt_lock);
1299 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1300 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1301 ret = -EBUSY;
1302 goto out;
1304 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1305 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1307 out:
1308 spin_unlock(&pfm_buffer_fmt_lock);
1309 return ret;
1311 EXPORT_SYMBOL(pfm_register_buffer_fmt);
1314 pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1316 pfm_buffer_fmt_t *fmt;
1317 int ret = 0;
1319 spin_lock(&pfm_buffer_fmt_lock);
1321 fmt = __pfm_find_buffer_fmt(uuid);
1322 if (!fmt) {
1323 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1324 ret = -EINVAL;
1325 goto out;
1327 list_del_init(&fmt->fmt_list);
1328 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1330 out:
1331 spin_unlock(&pfm_buffer_fmt_lock);
1332 return ret;
1335 EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1337 extern void update_pal_halt_status(int);
1339 static int
1340 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1342 unsigned long flags;
1344 * validity checks on cpu_mask have been done upstream
1346 LOCK_PFS(flags);
1348 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1349 pfm_sessions.pfs_sys_sessions,
1350 pfm_sessions.pfs_task_sessions,
1351 pfm_sessions.pfs_sys_use_dbregs,
1352 is_syswide,
1353 cpu));
1355 if (is_syswide) {
1357 * cannot mix system wide and per-task sessions
1359 if (pfm_sessions.pfs_task_sessions > 0UL) {
1360 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1361 pfm_sessions.pfs_task_sessions));
1362 goto abort;
1365 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1367 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1369 pfm_sessions.pfs_sys_session[cpu] = task;
1371 pfm_sessions.pfs_sys_sessions++ ;
1373 } else {
1374 if (pfm_sessions.pfs_sys_sessions) goto abort;
1375 pfm_sessions.pfs_task_sessions++;
1378 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1379 pfm_sessions.pfs_sys_sessions,
1380 pfm_sessions.pfs_task_sessions,
1381 pfm_sessions.pfs_sys_use_dbregs,
1382 is_syswide,
1383 cpu));
1386 * disable default_idle() to go to PAL_HALT
1388 update_pal_halt_status(0);
1390 UNLOCK_PFS(flags);
1392 return 0;
1394 error_conflict:
1395 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1396 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1397 cpu));
1398 abort:
1399 UNLOCK_PFS(flags);
1401 return -EBUSY;
1405 static int
1406 pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1408 unsigned long flags;
1410 * validity checks on cpu_mask have been done upstream
1412 LOCK_PFS(flags);
1414 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1415 pfm_sessions.pfs_sys_sessions,
1416 pfm_sessions.pfs_task_sessions,
1417 pfm_sessions.pfs_sys_use_dbregs,
1418 is_syswide,
1419 cpu));
1422 if (is_syswide) {
1423 pfm_sessions.pfs_sys_session[cpu] = NULL;
1425 * would not work with perfmon+more than one bit in cpu_mask
1427 if (ctx && ctx->ctx_fl_using_dbreg) {
1428 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1429 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1430 } else {
1431 pfm_sessions.pfs_sys_use_dbregs--;
1434 pfm_sessions.pfs_sys_sessions--;
1435 } else {
1436 pfm_sessions.pfs_task_sessions--;
1438 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1439 pfm_sessions.pfs_sys_sessions,
1440 pfm_sessions.pfs_task_sessions,
1441 pfm_sessions.pfs_sys_use_dbregs,
1442 is_syswide,
1443 cpu));
1446 * if possible, enable default_idle() to go into PAL_HALT
1448 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1449 update_pal_halt_status(1);
1451 UNLOCK_PFS(flags);
1453 return 0;
1457 * removes virtual mapping of the sampling buffer.
1458 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1459 * a PROTECT_CTX() section.
1461 static int
1462 pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1464 int r;
1466 /* sanity checks */
1467 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1468 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1469 return -EINVAL;
1472 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1475 * does the actual unmapping
1477 down_write(&task->mm->mmap_sem);
1479 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1481 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1483 up_write(&task->mm->mmap_sem);
1484 if (r !=0) {
1485 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1488 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1490 return 0;
1494 * free actual physical storage used by sampling buffer
1496 #if 0
1497 static int
1498 pfm_free_smpl_buffer(pfm_context_t *ctx)
1500 pfm_buffer_fmt_t *fmt;
1502 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1505 * we won't use the buffer format anymore
1507 fmt = ctx->ctx_buf_fmt;
1509 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1510 ctx->ctx_smpl_hdr,
1511 ctx->ctx_smpl_size,
1512 ctx->ctx_smpl_vaddr));
1514 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1517 * free the buffer
1519 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1521 ctx->ctx_smpl_hdr = NULL;
1522 ctx->ctx_smpl_size = 0UL;
1524 return 0;
1526 invalid_free:
1527 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1528 return -EINVAL;
1530 #endif
1532 static inline void
1533 pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1535 if (fmt == NULL) return;
1537 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1542 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1543 * no real gain from having the whole whorehouse mounted. So we don't need
1544 * any operations on the root directory. However, we need a non-trivial
1545 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1547 static struct vfsmount *pfmfs_mnt __read_mostly;
1549 static int __init
1550 init_pfm_fs(void)
1552 int err = register_filesystem(&pfm_fs_type);
1553 if (!err) {
1554 pfmfs_mnt = kern_mount(&pfm_fs_type);
1555 err = PTR_ERR(pfmfs_mnt);
1556 if (IS_ERR(pfmfs_mnt))
1557 unregister_filesystem(&pfm_fs_type);
1558 else
1559 err = 0;
1561 return err;
1564 static ssize_t
1565 pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1567 pfm_context_t *ctx;
1568 pfm_msg_t *msg;
1569 ssize_t ret;
1570 unsigned long flags;
1571 DECLARE_WAITQUEUE(wait, current);
1572 if (PFM_IS_FILE(filp) == 0) {
1573 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1574 return -EINVAL;
1577 ctx = filp->private_data;
1578 if (ctx == NULL) {
1579 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1580 return -EINVAL;
1584 * check even when there is no message
1586 if (size < sizeof(pfm_msg_t)) {
1587 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1588 return -EINVAL;
1591 PROTECT_CTX(ctx, flags);
1594 * put ourselves on the wait queue
1596 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1599 for(;;) {
1601 * check wait queue
1604 set_current_state(TASK_INTERRUPTIBLE);
1606 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1608 ret = 0;
1609 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1611 UNPROTECT_CTX(ctx, flags);
1614 * check non-blocking read
1616 ret = -EAGAIN;
1617 if(filp->f_flags & O_NONBLOCK) break;
1620 * check pending signals
1622 if(signal_pending(current)) {
1623 ret = -EINTR;
1624 break;
1627 * no message, so wait
1629 schedule();
1631 PROTECT_CTX(ctx, flags);
1633 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1634 set_current_state(TASK_RUNNING);
1635 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1637 if (ret < 0) goto abort;
1639 ret = -EINVAL;
1640 msg = pfm_get_next_msg(ctx);
1641 if (msg == NULL) {
1642 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1643 goto abort_locked;
1646 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1648 ret = -EFAULT;
1649 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1651 abort_locked:
1652 UNPROTECT_CTX(ctx, flags);
1653 abort:
1654 return ret;
1657 static ssize_t
1658 pfm_write(struct file *file, const char __user *ubuf,
1659 size_t size, loff_t *ppos)
1661 DPRINT(("pfm_write called\n"));
1662 return -EINVAL;
1665 static unsigned int
1666 pfm_poll(struct file *filp, poll_table * wait)
1668 pfm_context_t *ctx;
1669 unsigned long flags;
1670 unsigned int mask = 0;
1672 if (PFM_IS_FILE(filp) == 0) {
1673 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1674 return 0;
1677 ctx = filp->private_data;
1678 if (ctx == NULL) {
1679 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1680 return 0;
1684 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1686 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1688 PROTECT_CTX(ctx, flags);
1690 if (PFM_CTXQ_EMPTY(ctx) == 0)
1691 mask = POLLIN | POLLRDNORM;
1693 UNPROTECT_CTX(ctx, flags);
1695 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1697 return mask;
1700 static long
1701 pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1703 DPRINT(("pfm_ioctl called\n"));
1704 return -EINVAL;
1708 * interrupt cannot be masked when coming here
1710 static inline int
1711 pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1713 int ret;
1715 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1717 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1718 task_pid_nr(current),
1721 ctx->ctx_async_queue, ret));
1723 return ret;
1726 static int
1727 pfm_fasync(int fd, struct file *filp, int on)
1729 pfm_context_t *ctx;
1730 int ret;
1732 if (PFM_IS_FILE(filp) == 0) {
1733 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1734 return -EBADF;
1737 ctx = filp->private_data;
1738 if (ctx == NULL) {
1739 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1740 return -EBADF;
1743 * we cannot mask interrupts during this call because this may
1744 * may go to sleep if memory is not readily avalaible.
1746 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1747 * done in caller. Serialization of this function is ensured by caller.
1749 ret = pfm_do_fasync(fd, filp, ctx, on);
1752 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1755 ctx->ctx_async_queue, ret));
1757 return ret;
1760 #ifdef CONFIG_SMP
1762 * this function is exclusively called from pfm_close().
1763 * The context is not protected at that time, nor are interrupts
1764 * on the remote CPU. That's necessary to avoid deadlocks.
1766 static void
1767 pfm_syswide_force_stop(void *info)
1769 pfm_context_t *ctx = (pfm_context_t *)info;
1770 struct pt_regs *regs = task_pt_regs(current);
1771 struct task_struct *owner;
1772 unsigned long flags;
1773 int ret;
1775 if (ctx->ctx_cpu != smp_processor_id()) {
1776 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1777 ctx->ctx_cpu,
1778 smp_processor_id());
1779 return;
1781 owner = GET_PMU_OWNER();
1782 if (owner != ctx->ctx_task) {
1783 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1784 smp_processor_id(),
1785 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1786 return;
1788 if (GET_PMU_CTX() != ctx) {
1789 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1790 smp_processor_id(),
1791 GET_PMU_CTX(), ctx);
1792 return;
1795 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1797 * the context is already protected in pfm_close(), we simply
1798 * need to mask interrupts to avoid a PMU interrupt race on
1799 * this CPU
1801 local_irq_save(flags);
1803 ret = pfm_context_unload(ctx, NULL, 0, regs);
1804 if (ret) {
1805 DPRINT(("context_unload returned %d\n", ret));
1809 * unmask interrupts, PMU interrupts are now spurious here
1811 local_irq_restore(flags);
1814 static void
1815 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1817 int ret;
1819 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1820 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1821 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1823 #endif /* CONFIG_SMP */
1826 * called for each close(). Partially free resources.
1827 * When caller is self-monitoring, the context is unloaded.
1829 static int
1830 pfm_flush(struct file *filp, fl_owner_t id)
1832 pfm_context_t *ctx;
1833 struct task_struct *task;
1834 struct pt_regs *regs;
1835 unsigned long flags;
1836 unsigned long smpl_buf_size = 0UL;
1837 void *smpl_buf_vaddr = NULL;
1838 int state, is_system;
1840 if (PFM_IS_FILE(filp) == 0) {
1841 DPRINT(("bad magic for\n"));
1842 return -EBADF;
1845 ctx = filp->private_data;
1846 if (ctx == NULL) {
1847 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1848 return -EBADF;
1852 * remove our file from the async queue, if we use this mode.
1853 * This can be done without the context being protected. We come
1854 * here when the context has become unreachable by other tasks.
1856 * We may still have active monitoring at this point and we may
1857 * end up in pfm_overflow_handler(). However, fasync_helper()
1858 * operates with interrupts disabled and it cleans up the
1859 * queue. If the PMU handler is called prior to entering
1860 * fasync_helper() then it will send a signal. If it is
1861 * invoked after, it will find an empty queue and no
1862 * signal will be sent. In both case, we are safe
1864 PROTECT_CTX(ctx, flags);
1866 state = ctx->ctx_state;
1867 is_system = ctx->ctx_fl_system;
1869 task = PFM_CTX_TASK(ctx);
1870 regs = task_pt_regs(task);
1872 DPRINT(("ctx_state=%d is_current=%d\n",
1873 state,
1874 task == current ? 1 : 0));
1877 * if state == UNLOADED, then task is NULL
1881 * we must stop and unload because we are losing access to the context.
1883 if (task == current) {
1884 #ifdef CONFIG_SMP
1886 * the task IS the owner but it migrated to another CPU: that's bad
1887 * but we must handle this cleanly. Unfortunately, the kernel does
1888 * not provide a mechanism to block migration (while the context is loaded).
1890 * We need to release the resource on the ORIGINAL cpu.
1892 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1894 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1896 * keep context protected but unmask interrupt for IPI
1898 local_irq_restore(flags);
1900 pfm_syswide_cleanup_other_cpu(ctx);
1903 * restore interrupt masking
1905 local_irq_save(flags);
1908 * context is unloaded at this point
1910 } else
1911 #endif /* CONFIG_SMP */
1914 DPRINT(("forcing unload\n"));
1916 * stop and unload, returning with state UNLOADED
1917 * and session unreserved.
1919 pfm_context_unload(ctx, NULL, 0, regs);
1921 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1926 * remove virtual mapping, if any, for the calling task.
1927 * cannot reset ctx field until last user is calling close().
1929 * ctx_smpl_vaddr must never be cleared because it is needed
1930 * by every task with access to the context
1932 * When called from do_exit(), the mm context is gone already, therefore
1933 * mm is NULL, i.e., the VMA is already gone and we do not have to
1934 * do anything here
1936 if (ctx->ctx_smpl_vaddr && current->mm) {
1937 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1938 smpl_buf_size = ctx->ctx_smpl_size;
1941 UNPROTECT_CTX(ctx, flags);
1944 * if there was a mapping, then we systematically remove it
1945 * at this point. Cannot be done inside critical section
1946 * because some VM function reenables interrupts.
1949 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1951 return 0;
1954 * called either on explicit close() or from exit_files().
1955 * Only the LAST user of the file gets to this point, i.e., it is
1956 * called only ONCE.
1958 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1959 * (fput()),i.e, last task to access the file. Nobody else can access the
1960 * file at this point.
1962 * When called from exit_files(), the VMA has been freed because exit_mm()
1963 * is executed before exit_files().
1965 * When called from exit_files(), the current task is not yet ZOMBIE but we
1966 * flush the PMU state to the context.
1968 static int
1969 pfm_close(struct inode *inode, struct file *filp)
1971 pfm_context_t *ctx;
1972 struct task_struct *task;
1973 struct pt_regs *regs;
1974 DECLARE_WAITQUEUE(wait, current);
1975 unsigned long flags;
1976 unsigned long smpl_buf_size = 0UL;
1977 void *smpl_buf_addr = NULL;
1978 int free_possible = 1;
1979 int state, is_system;
1981 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1983 if (PFM_IS_FILE(filp) == 0) {
1984 DPRINT(("bad magic\n"));
1985 return -EBADF;
1988 ctx = filp->private_data;
1989 if (ctx == NULL) {
1990 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1991 return -EBADF;
1994 PROTECT_CTX(ctx, flags);
1996 state = ctx->ctx_state;
1997 is_system = ctx->ctx_fl_system;
1999 task = PFM_CTX_TASK(ctx);
2000 regs = task_pt_regs(task);
2002 DPRINT(("ctx_state=%d is_current=%d\n",
2003 state,
2004 task == current ? 1 : 0));
2007 * if task == current, then pfm_flush() unloaded the context
2009 if (state == PFM_CTX_UNLOADED) goto doit;
2012 * context is loaded/masked and task != current, we need to
2013 * either force an unload or go zombie
2017 * The task is currently blocked or will block after an overflow.
2018 * we must force it to wakeup to get out of the
2019 * MASKED state and transition to the unloaded state by itself.
2021 * This situation is only possible for per-task mode
2023 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2026 * set a "partial" zombie state to be checked
2027 * upon return from down() in pfm_handle_work().
2029 * We cannot use the ZOMBIE state, because it is checked
2030 * by pfm_load_regs() which is called upon wakeup from down().
2031 * In such case, it would free the context and then we would
2032 * return to pfm_handle_work() which would access the
2033 * stale context. Instead, we set a flag invisible to pfm_load_regs()
2034 * but visible to pfm_handle_work().
2036 * For some window of time, we have a zombie context with
2037 * ctx_state = MASKED and not ZOMBIE
2039 ctx->ctx_fl_going_zombie = 1;
2042 * force task to wake up from MASKED state
2044 complete(&ctx->ctx_restart_done);
2046 DPRINT(("waking up ctx_state=%d\n", state));
2049 * put ourself to sleep waiting for the other
2050 * task to report completion
2052 * the context is protected by mutex, therefore there
2053 * is no risk of being notified of completion before
2054 * begin actually on the waitq.
2056 set_current_state(TASK_INTERRUPTIBLE);
2057 add_wait_queue(&ctx->ctx_zombieq, &wait);
2059 UNPROTECT_CTX(ctx, flags);
2062 * XXX: check for signals :
2063 * - ok for explicit close
2064 * - not ok when coming from exit_files()
2066 schedule();
2069 PROTECT_CTX(ctx, flags);
2072 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2073 set_current_state(TASK_RUNNING);
2076 * context is unloaded at this point
2078 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2080 else if (task != current) {
2081 #ifdef CONFIG_SMP
2083 * switch context to zombie state
2085 ctx->ctx_state = PFM_CTX_ZOMBIE;
2087 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2089 * cannot free the context on the spot. deferred until
2090 * the task notices the ZOMBIE state
2092 free_possible = 0;
2093 #else
2094 pfm_context_unload(ctx, NULL, 0, regs);
2095 #endif
2098 doit:
2099 /* reload state, may have changed during opening of critical section */
2100 state = ctx->ctx_state;
2103 * the context is still attached to a task (possibly current)
2104 * we cannot destroy it right now
2108 * we must free the sampling buffer right here because
2109 * we cannot rely on it being cleaned up later by the
2110 * monitored task. It is not possible to free vmalloc'ed
2111 * memory in pfm_load_regs(). Instead, we remove the buffer
2112 * now. should there be subsequent PMU overflow originally
2113 * meant for sampling, the will be converted to spurious
2114 * and that's fine because the monitoring tools is gone anyway.
2116 if (ctx->ctx_smpl_hdr) {
2117 smpl_buf_addr = ctx->ctx_smpl_hdr;
2118 smpl_buf_size = ctx->ctx_smpl_size;
2119 /* no more sampling */
2120 ctx->ctx_smpl_hdr = NULL;
2121 ctx->ctx_fl_is_sampling = 0;
2124 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2125 state,
2126 free_possible,
2127 smpl_buf_addr,
2128 smpl_buf_size));
2130 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2133 * UNLOADED that the session has already been unreserved.
2135 if (state == PFM_CTX_ZOMBIE) {
2136 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2140 * disconnect file descriptor from context must be done
2141 * before we unlock.
2143 filp->private_data = NULL;
2146 * if we free on the spot, the context is now completely unreachable
2147 * from the callers side. The monitored task side is also cut, so we
2148 * can freely cut.
2150 * If we have a deferred free, only the caller side is disconnected.
2152 UNPROTECT_CTX(ctx, flags);
2155 * All memory free operations (especially for vmalloc'ed memory)
2156 * MUST be done with interrupts ENABLED.
2158 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2161 * return the memory used by the context
2163 if (free_possible) pfm_context_free(ctx);
2165 return 0;
2168 static int
2169 pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2171 DPRINT(("pfm_no_open called\n"));
2172 return -ENXIO;
2177 static const struct file_operations pfm_file_ops = {
2178 .llseek = no_llseek,
2179 .read = pfm_read,
2180 .write = pfm_write,
2181 .poll = pfm_poll,
2182 .unlocked_ioctl = pfm_ioctl,
2183 .open = pfm_no_open, /* special open code to disallow open via /proc */
2184 .fasync = pfm_fasync,
2185 .release = pfm_close,
2186 .flush = pfm_flush
2189 static int
2190 pfmfs_delete_dentry(const struct dentry *dentry)
2192 return 1;
2195 static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2197 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2198 dentry->d_inode->i_ino);
2201 static const struct dentry_operations pfmfs_dentry_operations = {
2202 .d_delete = pfmfs_delete_dentry,
2203 .d_dname = pfmfs_dname,
2207 static struct file *
2208 pfm_alloc_file(pfm_context_t *ctx)
2210 struct file *file;
2211 struct inode *inode;
2212 struct path path;
2213 struct qstr this = { .name = "" };
2216 * allocate a new inode
2218 inode = new_inode(pfmfs_mnt->mnt_sb);
2219 if (!inode)
2220 return ERR_PTR(-ENOMEM);
2222 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2224 inode->i_mode = S_IFCHR|S_IRUGO;
2225 inode->i_uid = current_fsuid();
2226 inode->i_gid = current_fsgid();
2229 * allocate a new dcache entry
2231 path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2232 if (!path.dentry) {
2233 iput(inode);
2234 return ERR_PTR(-ENOMEM);
2236 path.mnt = mntget(pfmfs_mnt);
2238 d_add(path.dentry, inode);
2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2241 if (!file) {
2242 path_put(&path);
2243 return ERR_PTR(-ENFILE);
2246 file->f_flags = O_RDONLY;
2247 file->private_data = ctx;
2249 return file;
2252 static int
2253 pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2255 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2257 while (size > 0) {
2258 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2261 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2262 return -ENOMEM;
2264 addr += PAGE_SIZE;
2265 buf += PAGE_SIZE;
2266 size -= PAGE_SIZE;
2268 return 0;
2272 * allocate a sampling buffer and remaps it into the user address space of the task
2274 static int
2275 pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2277 struct mm_struct *mm = task->mm;
2278 struct vm_area_struct *vma = NULL;
2279 unsigned long size;
2280 void *smpl_buf;
2284 * the fixed header + requested size and align to page boundary
2286 size = PAGE_ALIGN(rsize);
2288 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2291 * check requested size to avoid Denial-of-service attacks
2292 * XXX: may have to refine this test
2293 * Check against address space limit.
2295 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2296 * return -ENOMEM;
2298 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2299 return -ENOMEM;
2302 * We do the easy to undo allocations first.
2304 * pfm_rvmalloc(), clears the buffer, so there is no leak
2306 smpl_buf = pfm_rvmalloc(size);
2307 if (smpl_buf == NULL) {
2308 DPRINT(("Can't allocate sampling buffer\n"));
2309 return -ENOMEM;
2312 DPRINT(("smpl_buf @%p\n", smpl_buf));
2314 /* allocate vma */
2315 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2316 if (!vma) {
2317 DPRINT(("Cannot allocate vma\n"));
2318 goto error_kmem;
2320 INIT_LIST_HEAD(&vma->anon_vma_chain);
2323 * partially initialize the vma for the sampling buffer
2325 vma->vm_mm = mm;
2326 vma->vm_file = filp;
2327 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2328 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2331 * Now we have everything we need and we can initialize
2332 * and connect all the data structures
2335 ctx->ctx_smpl_hdr = smpl_buf;
2336 ctx->ctx_smpl_size = size; /* aligned size */
2339 * Let's do the difficult operations next.
2341 * now we atomically find some area in the address space and
2342 * remap the buffer in it.
2344 down_write(&task->mm->mmap_sem);
2346 /* find some free area in address space, must have mmap sem held */
2347 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2348 if (vma->vm_start == 0UL) {
2349 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2350 up_write(&task->mm->mmap_sem);
2351 goto error;
2353 vma->vm_end = vma->vm_start + size;
2354 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2356 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2358 /* can only be applied to current task, need to have the mm semaphore held when called */
2359 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2360 DPRINT(("Can't remap buffer\n"));
2361 up_write(&task->mm->mmap_sem);
2362 goto error;
2365 get_file(filp);
2368 * now insert the vma in the vm list for the process, must be
2369 * done with mmap lock held
2371 insert_vm_struct(mm, vma);
2373 mm->total_vm += size >> PAGE_SHIFT;
2374 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2375 vma_pages(vma));
2376 up_write(&task->mm->mmap_sem);
2379 * keep track of user level virtual address
2381 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2382 *(unsigned long *)user_vaddr = vma->vm_start;
2384 return 0;
2386 error:
2387 kmem_cache_free(vm_area_cachep, vma);
2388 error_kmem:
2389 pfm_rvfree(smpl_buf, size);
2391 return -ENOMEM;
2395 * XXX: do something better here
2397 static int
2398 pfm_bad_permissions(struct task_struct *task)
2400 const struct cred *tcred;
2401 uid_t uid = current_uid();
2402 gid_t gid = current_gid();
2403 int ret;
2405 rcu_read_lock();
2406 tcred = __task_cred(task);
2408 /* inspired by ptrace_attach() */
2409 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2410 uid,
2411 gid,
2412 tcred->euid,
2413 tcred->suid,
2414 tcred->uid,
2415 tcred->egid,
2416 tcred->sgid));
2418 ret = ((uid != tcred->euid)
2419 || (uid != tcred->suid)
2420 || (uid != tcred->uid)
2421 || (gid != tcred->egid)
2422 || (gid != tcred->sgid)
2423 || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
2425 rcu_read_unlock();
2426 return ret;
2429 static int
2430 pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2432 int ctx_flags;
2434 /* valid signal */
2436 ctx_flags = pfx->ctx_flags;
2438 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2441 * cannot block in this mode
2443 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2444 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2445 return -EINVAL;
2447 } else {
2449 /* probably more to add here */
2451 return 0;
2454 static int
2455 pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2456 unsigned int cpu, pfarg_context_t *arg)
2458 pfm_buffer_fmt_t *fmt = NULL;
2459 unsigned long size = 0UL;
2460 void *uaddr = NULL;
2461 void *fmt_arg = NULL;
2462 int ret = 0;
2463 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2465 /* invoke and lock buffer format, if found */
2466 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2467 if (fmt == NULL) {
2468 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2469 return -EINVAL;
2473 * buffer argument MUST be contiguous to pfarg_context_t
2475 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2477 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2479 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2481 if (ret) goto error;
2483 /* link buffer format and context */
2484 ctx->ctx_buf_fmt = fmt;
2485 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
2488 * check if buffer format wants to use perfmon buffer allocation/mapping service
2490 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2491 if (ret) goto error;
2493 if (size) {
2495 * buffer is always remapped into the caller's address space
2497 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2498 if (ret) goto error;
2500 /* keep track of user address of buffer */
2501 arg->ctx_smpl_vaddr = uaddr;
2503 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2505 error:
2506 return ret;
2509 static void
2510 pfm_reset_pmu_state(pfm_context_t *ctx)
2512 int i;
2515 * install reset values for PMC.
2517 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2518 if (PMC_IS_IMPL(i) == 0) continue;
2519 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2520 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2523 * PMD registers are set to 0UL when the context in memset()
2527 * On context switched restore, we must restore ALL pmc and ALL pmd even
2528 * when they are not actively used by the task. In UP, the incoming process
2529 * may otherwise pick up left over PMC, PMD state from the previous process.
2530 * As opposed to PMD, stale PMC can cause harm to the incoming
2531 * process because they may change what is being measured.
2532 * Therefore, we must systematically reinstall the entire
2533 * PMC state. In SMP, the same thing is possible on the
2534 * same CPU but also on between 2 CPUs.
2536 * The problem with PMD is information leaking especially
2537 * to user level when psr.sp=0
2539 * There is unfortunately no easy way to avoid this problem
2540 * on either UP or SMP. This definitively slows down the
2541 * pfm_load_regs() function.
2545 * bitmask of all PMCs accessible to this context
2547 * PMC0 is treated differently.
2549 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2552 * bitmask of all PMDs that are accessible to this context
2554 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2556 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2559 * useful in case of re-enable after disable
2561 ctx->ctx_used_ibrs[0] = 0UL;
2562 ctx->ctx_used_dbrs[0] = 0UL;
2565 static int
2566 pfm_ctx_getsize(void *arg, size_t *sz)
2568 pfarg_context_t *req = (pfarg_context_t *)arg;
2569 pfm_buffer_fmt_t *fmt;
2571 *sz = 0;
2573 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2575 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2576 if (fmt == NULL) {
2577 DPRINT(("cannot find buffer format\n"));
2578 return -EINVAL;
2580 /* get just enough to copy in user parameters */
2581 *sz = fmt->fmt_arg_size;
2582 DPRINT(("arg_size=%lu\n", *sz));
2584 return 0;
2590 * cannot attach if :
2591 * - kernel task
2592 * - task not owned by caller
2593 * - task incompatible with context mode
2595 static int
2596 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2599 * no kernel task or task not owner by caller
2601 if (task->mm == NULL) {
2602 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2603 return -EPERM;
2605 if (pfm_bad_permissions(task)) {
2606 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2607 return -EPERM;
2610 * cannot block in self-monitoring mode
2612 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2613 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2614 return -EINVAL;
2617 if (task->exit_state == EXIT_ZOMBIE) {
2618 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2619 return -EBUSY;
2623 * always ok for self
2625 if (task == current) return 0;
2627 if (!task_is_stopped_or_traced(task)) {
2628 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2629 return -EBUSY;
2632 * make sure the task is off any CPU
2634 wait_task_inactive(task, 0);
2636 /* more to come... */
2638 return 0;
2641 static int
2642 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2644 struct task_struct *p = current;
2645 int ret;
2647 /* XXX: need to add more checks here */
2648 if (pid < 2) return -EPERM;
2650 if (pid != task_pid_vnr(current)) {
2652 read_lock(&tasklist_lock);
2654 p = find_task_by_vpid(pid);
2656 /* make sure task cannot go away while we operate on it */
2657 if (p) get_task_struct(p);
2659 read_unlock(&tasklist_lock);
2661 if (p == NULL) return -ESRCH;
2664 ret = pfm_task_incompatible(ctx, p);
2665 if (ret == 0) {
2666 *task = p;
2667 } else if (p != current) {
2668 pfm_put_task(p);
2670 return ret;
2675 static int
2676 pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2678 pfarg_context_t *req = (pfarg_context_t *)arg;
2679 struct file *filp;
2680 struct path path;
2681 int ctx_flags;
2682 int fd;
2683 int ret;
2685 /* let's check the arguments first */
2686 ret = pfarg_is_sane(current, req);
2687 if (ret < 0)
2688 return ret;
2690 ctx_flags = req->ctx_flags;
2692 ret = -ENOMEM;
2694 fd = get_unused_fd();
2695 if (fd < 0)
2696 return fd;
2698 ctx = pfm_context_alloc(ctx_flags);
2699 if (!ctx)
2700 goto error;
2702 filp = pfm_alloc_file(ctx);
2703 if (IS_ERR(filp)) {
2704 ret = PTR_ERR(filp);
2705 goto error_file;
2708 req->ctx_fd = ctx->ctx_fd = fd;
2711 * does the user want to sample?
2713 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2714 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2715 if (ret)
2716 goto buffer_error;
2719 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2720 ctx,
2721 ctx_flags,
2722 ctx->ctx_fl_system,
2723 ctx->ctx_fl_block,
2724 ctx->ctx_fl_excl_idle,
2725 ctx->ctx_fl_no_msg,
2726 ctx->ctx_fd));
2729 * initialize soft PMU state
2731 pfm_reset_pmu_state(ctx);
2733 fd_install(fd, filp);
2735 return 0;
2737 buffer_error:
2738 path = filp->f_path;
2739 put_filp(filp);
2740 path_put(&path);
2742 if (ctx->ctx_buf_fmt) {
2743 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2745 error_file:
2746 pfm_context_free(ctx);
2748 error:
2749 put_unused_fd(fd);
2750 return ret;
2753 static inline unsigned long
2754 pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2756 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2757 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2758 extern unsigned long carta_random32 (unsigned long seed);
2760 if (reg->flags & PFM_REGFL_RANDOM) {
2761 new_seed = carta_random32(old_seed);
2762 val -= (old_seed & mask); /* counter values are negative numbers! */
2763 if ((mask >> 32) != 0)
2764 /* construct a full 64-bit random value: */
2765 new_seed |= carta_random32(old_seed >> 32) << 32;
2766 reg->seed = new_seed;
2768 reg->lval = val;
2769 return val;
2772 static void
2773 pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2775 unsigned long mask = ovfl_regs[0];
2776 unsigned long reset_others = 0UL;
2777 unsigned long val;
2778 int i;
2781 * now restore reset value on sampling overflowed counters
2783 mask >>= PMU_FIRST_COUNTER;
2784 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2786 if ((mask & 0x1UL) == 0UL) continue;
2788 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2789 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2791 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2795 * Now take care of resetting the other registers
2797 for(i = 0; reset_others; i++, reset_others >>= 1) {
2799 if ((reset_others & 0x1) == 0) continue;
2801 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2803 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2804 is_long_reset ? "long" : "short", i, val));
2808 static void
2809 pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2811 unsigned long mask = ovfl_regs[0];
2812 unsigned long reset_others = 0UL;
2813 unsigned long val;
2814 int i;
2816 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2818 if (ctx->ctx_state == PFM_CTX_MASKED) {
2819 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2820 return;
2824 * now restore reset value on sampling overflowed counters
2826 mask >>= PMU_FIRST_COUNTER;
2827 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2829 if ((mask & 0x1UL) == 0UL) continue;
2831 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2832 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2834 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2836 pfm_write_soft_counter(ctx, i, val);
2840 * Now take care of resetting the other registers
2842 for(i = 0; reset_others; i++, reset_others >>= 1) {
2844 if ((reset_others & 0x1) == 0) continue;
2846 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2848 if (PMD_IS_COUNTING(i)) {
2849 pfm_write_soft_counter(ctx, i, val);
2850 } else {
2851 ia64_set_pmd(i, val);
2853 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2854 is_long_reset ? "long" : "short", i, val));
2856 ia64_srlz_d();
2859 static int
2860 pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2862 struct task_struct *task;
2863 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2864 unsigned long value, pmc_pm;
2865 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2866 unsigned int cnum, reg_flags, flags, pmc_type;
2867 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2868 int is_monitor, is_counting, state;
2869 int ret = -EINVAL;
2870 pfm_reg_check_t wr_func;
2871 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2873 state = ctx->ctx_state;
2874 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2875 is_system = ctx->ctx_fl_system;
2876 task = ctx->ctx_task;
2877 impl_pmds = pmu_conf->impl_pmds[0];
2879 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2881 if (is_loaded) {
2883 * In system wide and when the context is loaded, access can only happen
2884 * when the caller is running on the CPU being monitored by the session.
2885 * It does not have to be the owner (ctx_task) of the context per se.
2887 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2888 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2889 return -EBUSY;
2891 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2893 expert_mode = pfm_sysctl.expert_mode;
2895 for (i = 0; i < count; i++, req++) {
2897 cnum = req->reg_num;
2898 reg_flags = req->reg_flags;
2899 value = req->reg_value;
2900 smpl_pmds = req->reg_smpl_pmds[0];
2901 reset_pmds = req->reg_reset_pmds[0];
2902 flags = 0;
2905 if (cnum >= PMU_MAX_PMCS) {
2906 DPRINT(("pmc%u is invalid\n", cnum));
2907 goto error;
2910 pmc_type = pmu_conf->pmc_desc[cnum].type;
2911 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2912 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2913 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2916 * we reject all non implemented PMC as well
2917 * as attempts to modify PMC[0-3] which are used
2918 * as status registers by the PMU
2920 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2921 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2922 goto error;
2924 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2926 * If the PMC is a monitor, then if the value is not the default:
2927 * - system-wide session: PMCx.pm=1 (privileged monitor)
2928 * - per-task : PMCx.pm=0 (user monitor)
2930 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2931 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2932 cnum,
2933 pmc_pm,
2934 is_system));
2935 goto error;
2938 if (is_counting) {
2940 * enforce generation of overflow interrupt. Necessary on all
2941 * CPUs.
2943 value |= 1 << PMU_PMC_OI;
2945 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2946 flags |= PFM_REGFL_OVFL_NOTIFY;
2949 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2951 /* verify validity of smpl_pmds */
2952 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2953 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2954 goto error;
2957 /* verify validity of reset_pmds */
2958 if ((reset_pmds & impl_pmds) != reset_pmds) {
2959 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2960 goto error;
2962 } else {
2963 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2964 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2965 goto error;
2967 /* eventid on non-counting monitors are ignored */
2971 * execute write checker, if any
2973 if (likely(expert_mode == 0 && wr_func)) {
2974 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2975 if (ret) goto error;
2976 ret = -EINVAL;
2980 * no error on this register
2982 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2985 * Now we commit the changes to the software state
2989 * update overflow information
2991 if (is_counting) {
2993 * full flag update each time a register is programmed
2995 ctx->ctx_pmds[cnum].flags = flags;
2997 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2998 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2999 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3002 * Mark all PMDS to be accessed as used.
3004 * We do not keep track of PMC because we have to
3005 * systematically restore ALL of them.
3007 * We do not update the used_monitors mask, because
3008 * if we have not programmed them, then will be in
3009 * a quiescent state, therefore we will not need to
3010 * mask/restore then when context is MASKED.
3012 CTX_USED_PMD(ctx, reset_pmds);
3013 CTX_USED_PMD(ctx, smpl_pmds);
3015 * make sure we do not try to reset on
3016 * restart because we have established new values
3018 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3021 * Needed in case the user does not initialize the equivalent
3022 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3023 * possible leak here.
3025 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3028 * keep track of the monitor PMC that we are using.
3029 * we save the value of the pmc in ctx_pmcs[] and if
3030 * the monitoring is not stopped for the context we also
3031 * place it in the saved state area so that it will be
3032 * picked up later by the context switch code.
3034 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3036 * The value in th_pmcs[] may be modified on overflow, i.e., when
3037 * monitoring needs to be stopped.
3039 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3042 * update context state
3044 ctx->ctx_pmcs[cnum] = value;
3046 if (is_loaded) {
3048 * write thread state
3050 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3053 * write hardware register if we can
3055 if (can_access_pmu) {
3056 ia64_set_pmc(cnum, value);
3058 #ifdef CONFIG_SMP
3059 else {
3061 * per-task SMP only here
3063 * we are guaranteed that the task is not running on the other CPU,
3064 * we indicate that this PMD will need to be reloaded if the task
3065 * is rescheduled on the CPU it ran last on.
3067 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3069 #endif
3072 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3073 cnum,
3074 value,
3075 is_loaded,
3076 can_access_pmu,
3077 flags,
3078 ctx->ctx_all_pmcs[0],
3079 ctx->ctx_used_pmds[0],
3080 ctx->ctx_pmds[cnum].eventid,
3081 smpl_pmds,
3082 reset_pmds,
3083 ctx->ctx_reload_pmcs[0],
3084 ctx->ctx_used_monitors[0],
3085 ctx->ctx_ovfl_regs[0]));
3089 * make sure the changes are visible
3091 if (can_access_pmu) ia64_srlz_d();
3093 return 0;
3094 error:
3095 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3096 return ret;
3099 static int
3100 pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3102 struct task_struct *task;
3103 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3104 unsigned long value, hw_value, ovfl_mask;
3105 unsigned int cnum;
3106 int i, can_access_pmu = 0, state;
3107 int is_counting, is_loaded, is_system, expert_mode;
3108 int ret = -EINVAL;
3109 pfm_reg_check_t wr_func;
3112 state = ctx->ctx_state;
3113 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3114 is_system = ctx->ctx_fl_system;
3115 ovfl_mask = pmu_conf->ovfl_val;
3116 task = ctx->ctx_task;
3118 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3121 * on both UP and SMP, we can only write to the PMC when the task is
3122 * the owner of the local PMU.
3124 if (likely(is_loaded)) {
3126 * In system wide and when the context is loaded, access can only happen
3127 * when the caller is running on the CPU being monitored by the session.
3128 * It does not have to be the owner (ctx_task) of the context per se.
3130 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3131 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3132 return -EBUSY;
3134 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3136 expert_mode = pfm_sysctl.expert_mode;
3138 for (i = 0; i < count; i++, req++) {
3140 cnum = req->reg_num;
3141 value = req->reg_value;
3143 if (!PMD_IS_IMPL(cnum)) {
3144 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3145 goto abort_mission;
3147 is_counting = PMD_IS_COUNTING(cnum);
3148 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3151 * execute write checker, if any
3153 if (unlikely(expert_mode == 0 && wr_func)) {
3154 unsigned long v = value;
3156 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3157 if (ret) goto abort_mission;
3159 value = v;
3160 ret = -EINVAL;
3164 * no error on this register
3166 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3169 * now commit changes to software state
3171 hw_value = value;
3174 * update virtualized (64bits) counter
3176 if (is_counting) {
3178 * write context state
3180 ctx->ctx_pmds[cnum].lval = value;
3183 * when context is load we use the split value
3185 if (is_loaded) {
3186 hw_value = value & ovfl_mask;
3187 value = value & ~ovfl_mask;
3191 * update reset values (not just for counters)
3193 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3194 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3197 * update randomization parameters (not just for counters)
3199 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3200 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3203 * update context value
3205 ctx->ctx_pmds[cnum].val = value;
3208 * Keep track of what we use
3210 * We do not keep track of PMC because we have to
3211 * systematically restore ALL of them.
3213 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3216 * mark this PMD register used as well
3218 CTX_USED_PMD(ctx, RDEP(cnum));
3221 * make sure we do not try to reset on
3222 * restart because we have established new values
3224 if (is_counting && state == PFM_CTX_MASKED) {
3225 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3228 if (is_loaded) {
3230 * write thread state
3232 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3235 * write hardware register if we can
3237 if (can_access_pmu) {
3238 ia64_set_pmd(cnum, hw_value);
3239 } else {
3240 #ifdef CONFIG_SMP
3242 * we are guaranteed that the task is not running on the other CPU,
3243 * we indicate that this PMD will need to be reloaded if the task
3244 * is rescheduled on the CPU it ran last on.
3246 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3247 #endif
3251 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3252 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3253 cnum,
3254 value,
3255 is_loaded,
3256 can_access_pmu,
3257 hw_value,
3258 ctx->ctx_pmds[cnum].val,
3259 ctx->ctx_pmds[cnum].short_reset,
3260 ctx->ctx_pmds[cnum].long_reset,
3261 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3262 ctx->ctx_pmds[cnum].seed,
3263 ctx->ctx_pmds[cnum].mask,
3264 ctx->ctx_used_pmds[0],
3265 ctx->ctx_pmds[cnum].reset_pmds[0],
3266 ctx->ctx_reload_pmds[0],
3267 ctx->ctx_all_pmds[0],
3268 ctx->ctx_ovfl_regs[0]));
3272 * make changes visible
3274 if (can_access_pmu) ia64_srlz_d();
3276 return 0;
3278 abort_mission:
3280 * for now, we have only one possibility for error
3282 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3283 return ret;
3287 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3288 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3289 * interrupt is delivered during the call, it will be kept pending until we leave, making
3290 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3291 * guaranteed to return consistent data to the user, it may simply be old. It is not
3292 * trivial to treat the overflow while inside the call because you may end up in
3293 * some module sampling buffer code causing deadlocks.
3295 static int
3296 pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3298 struct task_struct *task;
3299 unsigned long val = 0UL, lval, ovfl_mask, sval;
3300 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3301 unsigned int cnum, reg_flags = 0;
3302 int i, can_access_pmu = 0, state;
3303 int is_loaded, is_system, is_counting, expert_mode;
3304 int ret = -EINVAL;
3305 pfm_reg_check_t rd_func;
3308 * access is possible when loaded only for
3309 * self-monitoring tasks or in UP mode
3312 state = ctx->ctx_state;
3313 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3314 is_system = ctx->ctx_fl_system;
3315 ovfl_mask = pmu_conf->ovfl_val;
3316 task = ctx->ctx_task;
3318 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3320 if (likely(is_loaded)) {
3322 * In system wide and when the context is loaded, access can only happen
3323 * when the caller is running on the CPU being monitored by the session.
3324 * It does not have to be the owner (ctx_task) of the context per se.
3326 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3327 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3328 return -EBUSY;
3331 * this can be true when not self-monitoring only in UP
3333 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3335 if (can_access_pmu) ia64_srlz_d();
3337 expert_mode = pfm_sysctl.expert_mode;
3339 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3340 is_loaded,
3341 can_access_pmu,
3342 state));
3345 * on both UP and SMP, we can only read the PMD from the hardware register when
3346 * the task is the owner of the local PMU.
3349 for (i = 0; i < count; i++, req++) {
3351 cnum = req->reg_num;
3352 reg_flags = req->reg_flags;
3354 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3356 * we can only read the register that we use. That includes
3357 * the one we explicitly initialize AND the one we want included
3358 * in the sampling buffer (smpl_regs).
3360 * Having this restriction allows optimization in the ctxsw routine
3361 * without compromising security (leaks)
3363 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3365 sval = ctx->ctx_pmds[cnum].val;
3366 lval = ctx->ctx_pmds[cnum].lval;
3367 is_counting = PMD_IS_COUNTING(cnum);
3370 * If the task is not the current one, then we check if the
3371 * PMU state is still in the local live register due to lazy ctxsw.
3372 * If true, then we read directly from the registers.
3374 if (can_access_pmu){
3375 val = ia64_get_pmd(cnum);
3376 } else {
3378 * context has been saved
3379 * if context is zombie, then task does not exist anymore.
3380 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3382 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3384 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3386 if (is_counting) {
3388 * XXX: need to check for overflow when loaded
3390 val &= ovfl_mask;
3391 val += sval;
3395 * execute read checker, if any
3397 if (unlikely(expert_mode == 0 && rd_func)) {
3398 unsigned long v = val;
3399 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3400 if (ret) goto error;
3401 val = v;
3402 ret = -EINVAL;
3405 PFM_REG_RETFLAG_SET(reg_flags, 0);
3407 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3410 * update register return value, abort all if problem during copy.
3411 * we only modify the reg_flags field. no check mode is fine because
3412 * access has been verified upfront in sys_perfmonctl().
3414 req->reg_value = val;
3415 req->reg_flags = reg_flags;
3416 req->reg_last_reset_val = lval;
3419 return 0;
3421 error:
3422 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3423 return ret;
3427 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3429 pfm_context_t *ctx;
3431 if (req == NULL) return -EINVAL;
3433 ctx = GET_PMU_CTX();
3435 if (ctx == NULL) return -EINVAL;
3438 * for now limit to current task, which is enough when calling
3439 * from overflow handler
3441 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3443 return pfm_write_pmcs(ctx, req, nreq, regs);
3445 EXPORT_SYMBOL(pfm_mod_write_pmcs);
3448 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3450 pfm_context_t *ctx;
3452 if (req == NULL) return -EINVAL;
3454 ctx = GET_PMU_CTX();
3456 if (ctx == NULL) return -EINVAL;
3459 * for now limit to current task, which is enough when calling
3460 * from overflow handler
3462 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3464 return pfm_read_pmds(ctx, req, nreq, regs);
3466 EXPORT_SYMBOL(pfm_mod_read_pmds);
3469 * Only call this function when a process it trying to
3470 * write the debug registers (reading is always allowed)
3473 pfm_use_debug_registers(struct task_struct *task)
3475 pfm_context_t *ctx = task->thread.pfm_context;
3476 unsigned long flags;
3477 int ret = 0;
3479 if (pmu_conf->use_rr_dbregs == 0) return 0;
3481 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3484 * do it only once
3486 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3489 * Even on SMP, we do not need to use an atomic here because
3490 * the only way in is via ptrace() and this is possible only when the
3491 * process is stopped. Even in the case where the ctxsw out is not totally
3492 * completed by the time we come here, there is no way the 'stopped' process
3493 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3494 * So this is always safe.
3496 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3498 LOCK_PFS(flags);
3501 * We cannot allow setting breakpoints when system wide monitoring
3502 * sessions are using the debug registers.
3504 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3505 ret = -1;
3506 else
3507 pfm_sessions.pfs_ptrace_use_dbregs++;
3509 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3510 pfm_sessions.pfs_ptrace_use_dbregs,
3511 pfm_sessions.pfs_sys_use_dbregs,
3512 task_pid_nr(task), ret));
3514 UNLOCK_PFS(flags);
3516 return ret;
3520 * This function is called for every task that exits with the
3521 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3522 * able to use the debug registers for debugging purposes via
3523 * ptrace(). Therefore we know it was not using them for
3524 * performance monitoring, so we only decrement the number
3525 * of "ptraced" debug register users to keep the count up to date
3528 pfm_release_debug_registers(struct task_struct *task)
3530 unsigned long flags;
3531 int ret;
3533 if (pmu_conf->use_rr_dbregs == 0) return 0;
3535 LOCK_PFS(flags);
3536 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3537 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3538 ret = -1;
3539 } else {
3540 pfm_sessions.pfs_ptrace_use_dbregs--;
3541 ret = 0;
3543 UNLOCK_PFS(flags);
3545 return ret;
3548 static int
3549 pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3551 struct task_struct *task;
3552 pfm_buffer_fmt_t *fmt;
3553 pfm_ovfl_ctrl_t rst_ctrl;
3554 int state, is_system;
3555 int ret = 0;
3557 state = ctx->ctx_state;
3558 fmt = ctx->ctx_buf_fmt;
3559 is_system = ctx->ctx_fl_system;
3560 task = PFM_CTX_TASK(ctx);
3562 switch(state) {
3563 case PFM_CTX_MASKED:
3564 break;
3565 case PFM_CTX_LOADED:
3566 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3567 /* fall through */
3568 case PFM_CTX_UNLOADED:
3569 case PFM_CTX_ZOMBIE:
3570 DPRINT(("invalid state=%d\n", state));
3571 return -EBUSY;
3572 default:
3573 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3574 return -EINVAL;
3578 * In system wide and when the context is loaded, access can only happen
3579 * when the caller is running on the CPU being monitored by the session.
3580 * It does not have to be the owner (ctx_task) of the context per se.
3582 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3583 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3584 return -EBUSY;
3587 /* sanity check */
3588 if (unlikely(task == NULL)) {
3589 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3590 return -EINVAL;
3593 if (task == current || is_system) {
3595 fmt = ctx->ctx_buf_fmt;
3597 DPRINT(("restarting self %d ovfl=0x%lx\n",
3598 task_pid_nr(task),
3599 ctx->ctx_ovfl_regs[0]));
3601 if (CTX_HAS_SMPL(ctx)) {
3603 prefetch(ctx->ctx_smpl_hdr);
3605 rst_ctrl.bits.mask_monitoring = 0;
3606 rst_ctrl.bits.reset_ovfl_pmds = 0;
3608 if (state == PFM_CTX_LOADED)
3609 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3610 else
3611 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3612 } else {
3613 rst_ctrl.bits.mask_monitoring = 0;
3614 rst_ctrl.bits.reset_ovfl_pmds = 1;
3617 if (ret == 0) {
3618 if (rst_ctrl.bits.reset_ovfl_pmds)
3619 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3621 if (rst_ctrl.bits.mask_monitoring == 0) {
3622 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3624 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3625 } else {
3626 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3628 // cannot use pfm_stop_monitoring(task, regs);
3632 * clear overflowed PMD mask to remove any stale information
3634 ctx->ctx_ovfl_regs[0] = 0UL;
3637 * back to LOADED state
3639 ctx->ctx_state = PFM_CTX_LOADED;
3642 * XXX: not really useful for self monitoring
3644 ctx->ctx_fl_can_restart = 0;
3646 return 0;
3650 * restart another task
3654 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3655 * one is seen by the task.
3657 if (state == PFM_CTX_MASKED) {
3658 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3660 * will prevent subsequent restart before this one is
3661 * seen by other task
3663 ctx->ctx_fl_can_restart = 0;
3667 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3668 * the task is blocked or on its way to block. That's the normal
3669 * restart path. If the monitoring is not masked, then the task
3670 * can be actively monitoring and we cannot directly intervene.
3671 * Therefore we use the trap mechanism to catch the task and
3672 * force it to reset the buffer/reset PMDs.
3674 * if non-blocking, then we ensure that the task will go into
3675 * pfm_handle_work() before returning to user mode.
3677 * We cannot explicitly reset another task, it MUST always
3678 * be done by the task itself. This works for system wide because
3679 * the tool that is controlling the session is logically doing
3680 * "self-monitoring".
3682 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3683 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3684 complete(&ctx->ctx_restart_done);
3685 } else {
3686 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3688 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3690 PFM_SET_WORK_PENDING(task, 1);
3692 set_notify_resume(task);
3695 * XXX: send reschedule if task runs on another CPU
3698 return 0;
3701 static int
3702 pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3704 unsigned int m = *(unsigned int *)arg;
3706 pfm_sysctl.debug = m == 0 ? 0 : 1;
3708 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3710 if (m == 0) {
3711 memset(pfm_stats, 0, sizeof(pfm_stats));
3712 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3714 return 0;
3718 * arg can be NULL and count can be zero for this function
3720 static int
3721 pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3723 struct thread_struct *thread = NULL;
3724 struct task_struct *task;
3725 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3726 unsigned long flags;
3727 dbreg_t dbreg;
3728 unsigned int rnum;
3729 int first_time;
3730 int ret = 0, state;
3731 int i, can_access_pmu = 0;
3732 int is_system, is_loaded;
3734 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3736 state = ctx->ctx_state;
3737 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3738 is_system = ctx->ctx_fl_system;
3739 task = ctx->ctx_task;
3741 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3744 * on both UP and SMP, we can only write to the PMC when the task is
3745 * the owner of the local PMU.
3747 if (is_loaded) {
3748 thread = &task->thread;
3750 * In system wide and when the context is loaded, access can only happen
3751 * when the caller is running on the CPU being monitored by the session.
3752 * It does not have to be the owner (ctx_task) of the context per se.
3754 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3755 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3756 return -EBUSY;
3758 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3762 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3763 * ensuring that no real breakpoint can be installed via this call.
3765 * IMPORTANT: regs can be NULL in this function
3768 first_time = ctx->ctx_fl_using_dbreg == 0;
3771 * don't bother if we are loaded and task is being debugged
3773 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3774 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3775 return -EBUSY;
3779 * check for debug registers in system wide mode
3781 * If though a check is done in pfm_context_load(),
3782 * we must repeat it here, in case the registers are
3783 * written after the context is loaded
3785 if (is_loaded) {
3786 LOCK_PFS(flags);
3788 if (first_time && is_system) {
3789 if (pfm_sessions.pfs_ptrace_use_dbregs)
3790 ret = -EBUSY;
3791 else
3792 pfm_sessions.pfs_sys_use_dbregs++;
3794 UNLOCK_PFS(flags);
3797 if (ret != 0) return ret;
3800 * mark ourself as user of the debug registers for
3801 * perfmon purposes.
3803 ctx->ctx_fl_using_dbreg = 1;
3806 * clear hardware registers to make sure we don't
3807 * pick up stale state.
3809 * for a system wide session, we do not use
3810 * thread.dbr, thread.ibr because this process
3811 * never leaves the current CPU and the state
3812 * is shared by all processes running on it
3814 if (first_time && can_access_pmu) {
3815 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3816 for (i=0; i < pmu_conf->num_ibrs; i++) {
3817 ia64_set_ibr(i, 0UL);
3818 ia64_dv_serialize_instruction();
3820 ia64_srlz_i();
3821 for (i=0; i < pmu_conf->num_dbrs; i++) {
3822 ia64_set_dbr(i, 0UL);
3823 ia64_dv_serialize_data();
3825 ia64_srlz_d();
3829 * Now install the values into the registers
3831 for (i = 0; i < count; i++, req++) {
3833 rnum = req->dbreg_num;
3834 dbreg.val = req->dbreg_value;
3836 ret = -EINVAL;
3838 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3839 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3840 rnum, dbreg.val, mode, i, count));
3842 goto abort_mission;
3846 * make sure we do not install enabled breakpoint
3848 if (rnum & 0x1) {
3849 if (mode == PFM_CODE_RR)
3850 dbreg.ibr.ibr_x = 0;
3851 else
3852 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3855 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3858 * Debug registers, just like PMC, can only be modified
3859 * by a kernel call. Moreover, perfmon() access to those
3860 * registers are centralized in this routine. The hardware
3861 * does not modify the value of these registers, therefore,
3862 * if we save them as they are written, we can avoid having
3863 * to save them on context switch out. This is made possible
3864 * by the fact that when perfmon uses debug registers, ptrace()
3865 * won't be able to modify them concurrently.
3867 if (mode == PFM_CODE_RR) {
3868 CTX_USED_IBR(ctx, rnum);
3870 if (can_access_pmu) {
3871 ia64_set_ibr(rnum, dbreg.val);
3872 ia64_dv_serialize_instruction();
3875 ctx->ctx_ibrs[rnum] = dbreg.val;
3877 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3878 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3879 } else {
3880 CTX_USED_DBR(ctx, rnum);
3882 if (can_access_pmu) {
3883 ia64_set_dbr(rnum, dbreg.val);
3884 ia64_dv_serialize_data();
3886 ctx->ctx_dbrs[rnum] = dbreg.val;
3888 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3889 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3893 return 0;
3895 abort_mission:
3897 * in case it was our first attempt, we undo the global modifications
3899 if (first_time) {
3900 LOCK_PFS(flags);
3901 if (ctx->ctx_fl_system) {
3902 pfm_sessions.pfs_sys_use_dbregs--;
3904 UNLOCK_PFS(flags);
3905 ctx->ctx_fl_using_dbreg = 0;
3908 * install error return flag
3910 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3912 return ret;
3915 static int
3916 pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3918 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3921 static int
3922 pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3924 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3928 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3930 pfm_context_t *ctx;
3932 if (req == NULL) return -EINVAL;
3934 ctx = GET_PMU_CTX();
3936 if (ctx == NULL) return -EINVAL;
3939 * for now limit to current task, which is enough when calling
3940 * from overflow handler
3942 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3944 return pfm_write_ibrs(ctx, req, nreq, regs);
3946 EXPORT_SYMBOL(pfm_mod_write_ibrs);
3949 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3951 pfm_context_t *ctx;
3953 if (req == NULL) return -EINVAL;
3955 ctx = GET_PMU_CTX();
3957 if (ctx == NULL) return -EINVAL;
3960 * for now limit to current task, which is enough when calling
3961 * from overflow handler
3963 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3965 return pfm_write_dbrs(ctx, req, nreq, regs);
3967 EXPORT_SYMBOL(pfm_mod_write_dbrs);
3970 static int
3971 pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3973 pfarg_features_t *req = (pfarg_features_t *)arg;
3975 req->ft_version = PFM_VERSION;
3976 return 0;
3979 static int
3980 pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3982 struct pt_regs *tregs;
3983 struct task_struct *task = PFM_CTX_TASK(ctx);
3984 int state, is_system;
3986 state = ctx->ctx_state;
3987 is_system = ctx->ctx_fl_system;
3990 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3992 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3995 * In system wide and when the context is loaded, access can only happen
3996 * when the caller is running on the CPU being monitored by the session.
3997 * It does not have to be the owner (ctx_task) of the context per se.
3999 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4000 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4001 return -EBUSY;
4003 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4004 task_pid_nr(PFM_CTX_TASK(ctx)),
4005 state,
4006 is_system));
4008 * in system mode, we need to update the PMU directly
4009 * and the user level state of the caller, which may not
4010 * necessarily be the creator of the context.
4012 if (is_system) {
4014 * Update local PMU first
4016 * disable dcr pp
4018 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4019 ia64_srlz_i();
4022 * update local cpuinfo
4024 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4027 * stop monitoring, does srlz.i
4029 pfm_clear_psr_pp();
4032 * stop monitoring in the caller
4034 ia64_psr(regs)->pp = 0;
4036 return 0;
4039 * per-task mode
4042 if (task == current) {
4043 /* stop monitoring at kernel level */
4044 pfm_clear_psr_up();
4047 * stop monitoring at the user level
4049 ia64_psr(regs)->up = 0;
4050 } else {
4051 tregs = task_pt_regs(task);
4054 * stop monitoring at the user level
4056 ia64_psr(tregs)->up = 0;
4059 * monitoring disabled in kernel at next reschedule
4061 ctx->ctx_saved_psr_up = 0;
4062 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4064 return 0;
4068 static int
4069 pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4071 struct pt_regs *tregs;
4072 int state, is_system;
4074 state = ctx->ctx_state;
4075 is_system = ctx->ctx_fl_system;
4077 if (state != PFM_CTX_LOADED) return -EINVAL;
4080 * In system wide and when the context is loaded, access can only happen
4081 * when the caller is running on the CPU being monitored by the session.
4082 * It does not have to be the owner (ctx_task) of the context per se.
4084 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4085 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4086 return -EBUSY;
4090 * in system mode, we need to update the PMU directly
4091 * and the user level state of the caller, which may not
4092 * necessarily be the creator of the context.
4094 if (is_system) {
4097 * set user level psr.pp for the caller
4099 ia64_psr(regs)->pp = 1;
4102 * now update the local PMU and cpuinfo
4104 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4107 * start monitoring at kernel level
4109 pfm_set_psr_pp();
4111 /* enable dcr pp */
4112 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4113 ia64_srlz_i();
4115 return 0;
4119 * per-process mode
4122 if (ctx->ctx_task == current) {
4124 /* start monitoring at kernel level */
4125 pfm_set_psr_up();
4128 * activate monitoring at user level
4130 ia64_psr(regs)->up = 1;
4132 } else {
4133 tregs = task_pt_regs(ctx->ctx_task);
4136 * start monitoring at the kernel level the next
4137 * time the task is scheduled
4139 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4142 * activate monitoring at user level
4144 ia64_psr(tregs)->up = 1;
4146 return 0;
4149 static int
4150 pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4152 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4153 unsigned int cnum;
4154 int i;
4155 int ret = -EINVAL;
4157 for (i = 0; i < count; i++, req++) {
4159 cnum = req->reg_num;
4161 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4163 req->reg_value = PMC_DFL_VAL(cnum);
4165 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4167 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4169 return 0;
4171 abort_mission:
4172 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4173 return ret;
4176 static int
4177 pfm_check_task_exist(pfm_context_t *ctx)
4179 struct task_struct *g, *t;
4180 int ret = -ESRCH;
4182 read_lock(&tasklist_lock);
4184 do_each_thread (g, t) {
4185 if (t->thread.pfm_context == ctx) {
4186 ret = 0;
4187 goto out;
4189 } while_each_thread (g, t);
4190 out:
4191 read_unlock(&tasklist_lock);
4193 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4195 return ret;
4198 static int
4199 pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4201 struct task_struct *task;
4202 struct thread_struct *thread;
4203 struct pfm_context_t *old;
4204 unsigned long flags;
4205 #ifndef CONFIG_SMP
4206 struct task_struct *owner_task = NULL;
4207 #endif
4208 pfarg_load_t *req = (pfarg_load_t *)arg;
4209 unsigned long *pmcs_source, *pmds_source;
4210 int the_cpu;
4211 int ret = 0;
4212 int state, is_system, set_dbregs = 0;
4214 state = ctx->ctx_state;
4215 is_system = ctx->ctx_fl_system;
4217 * can only load from unloaded or terminated state
4219 if (state != PFM_CTX_UNLOADED) {
4220 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4221 req->load_pid,
4222 ctx->ctx_state));
4223 return -EBUSY;
4226 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4228 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4229 DPRINT(("cannot use blocking mode on self\n"));
4230 return -EINVAL;
4233 ret = pfm_get_task(ctx, req->load_pid, &task);
4234 if (ret) {
4235 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4236 return ret;
4239 ret = -EINVAL;
4242 * system wide is self monitoring only
4244 if (is_system && task != current) {
4245 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4246 req->load_pid));
4247 goto error;
4250 thread = &task->thread;
4252 ret = 0;
4254 * cannot load a context which is using range restrictions,
4255 * into a task that is being debugged.
4257 if (ctx->ctx_fl_using_dbreg) {
4258 if (thread->flags & IA64_THREAD_DBG_VALID) {
4259 ret = -EBUSY;
4260 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4261 goto error;
4263 LOCK_PFS(flags);
4265 if (is_system) {
4266 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4267 DPRINT(("cannot load [%d] dbregs in use\n",
4268 task_pid_nr(task)));
4269 ret = -EBUSY;
4270 } else {
4271 pfm_sessions.pfs_sys_use_dbregs++;
4272 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4273 set_dbregs = 1;
4277 UNLOCK_PFS(flags);
4279 if (ret) goto error;
4283 * SMP system-wide monitoring implies self-monitoring.
4285 * The programming model expects the task to
4286 * be pinned on a CPU throughout the session.
4287 * Here we take note of the current CPU at the
4288 * time the context is loaded. No call from
4289 * another CPU will be allowed.
4291 * The pinning via shed_setaffinity()
4292 * must be done by the calling task prior
4293 * to this call.
4295 * systemwide: keep track of CPU this session is supposed to run on
4297 the_cpu = ctx->ctx_cpu = smp_processor_id();
4299 ret = -EBUSY;
4301 * now reserve the session
4303 ret = pfm_reserve_session(current, is_system, the_cpu);
4304 if (ret) goto error;
4307 * task is necessarily stopped at this point.
4309 * If the previous context was zombie, then it got removed in
4310 * pfm_save_regs(). Therefore we should not see it here.
4311 * If we see a context, then this is an active context
4313 * XXX: needs to be atomic
4315 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4316 thread->pfm_context, ctx));
4318 ret = -EBUSY;
4319 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4320 if (old != NULL) {
4321 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4322 goto error_unres;
4325 pfm_reset_msgq(ctx);
4327 ctx->ctx_state = PFM_CTX_LOADED;
4330 * link context to task
4332 ctx->ctx_task = task;
4334 if (is_system) {
4336 * we load as stopped
4338 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4339 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4341 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4342 } else {
4343 thread->flags |= IA64_THREAD_PM_VALID;
4347 * propagate into thread-state
4349 pfm_copy_pmds(task, ctx);
4350 pfm_copy_pmcs(task, ctx);
4352 pmcs_source = ctx->th_pmcs;
4353 pmds_source = ctx->th_pmds;
4356 * always the case for system-wide
4358 if (task == current) {
4360 if (is_system == 0) {
4362 /* allow user level control */
4363 ia64_psr(regs)->sp = 0;
4364 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4366 SET_LAST_CPU(ctx, smp_processor_id());
4367 INC_ACTIVATION();
4368 SET_ACTIVATION(ctx);
4369 #ifndef CONFIG_SMP
4371 * push the other task out, if any
4373 owner_task = GET_PMU_OWNER();
4374 if (owner_task) pfm_lazy_save_regs(owner_task);
4375 #endif
4378 * load all PMD from ctx to PMU (as opposed to thread state)
4379 * restore all PMC from ctx to PMU
4381 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4382 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4384 ctx->ctx_reload_pmcs[0] = 0UL;
4385 ctx->ctx_reload_pmds[0] = 0UL;
4388 * guaranteed safe by earlier check against DBG_VALID
4390 if (ctx->ctx_fl_using_dbreg) {
4391 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4392 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4395 * set new ownership
4397 SET_PMU_OWNER(task, ctx);
4399 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4400 } else {
4402 * when not current, task MUST be stopped, so this is safe
4404 regs = task_pt_regs(task);
4406 /* force a full reload */
4407 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4408 SET_LAST_CPU(ctx, -1);
4410 /* initial saved psr (stopped) */
4411 ctx->ctx_saved_psr_up = 0UL;
4412 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4415 ret = 0;
4417 error_unres:
4418 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4419 error:
4421 * we must undo the dbregs setting (for system-wide)
4423 if (ret && set_dbregs) {
4424 LOCK_PFS(flags);
4425 pfm_sessions.pfs_sys_use_dbregs--;
4426 UNLOCK_PFS(flags);
4429 * release task, there is now a link with the context
4431 if (is_system == 0 && task != current) {
4432 pfm_put_task(task);
4434 if (ret == 0) {
4435 ret = pfm_check_task_exist(ctx);
4436 if (ret) {
4437 ctx->ctx_state = PFM_CTX_UNLOADED;
4438 ctx->ctx_task = NULL;
4442 return ret;
4446 * in this function, we do not need to increase the use count
4447 * for the task via get_task_struct(), because we hold the
4448 * context lock. If the task were to disappear while having
4449 * a context attached, it would go through pfm_exit_thread()
4450 * which also grabs the context lock and would therefore be blocked
4451 * until we are here.
4453 static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4455 static int
4456 pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4458 struct task_struct *task = PFM_CTX_TASK(ctx);
4459 struct pt_regs *tregs;
4460 int prev_state, is_system;
4461 int ret;
4463 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4465 prev_state = ctx->ctx_state;
4466 is_system = ctx->ctx_fl_system;
4469 * unload only when necessary
4471 if (prev_state == PFM_CTX_UNLOADED) {
4472 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4473 return 0;
4477 * clear psr and dcr bits
4479 ret = pfm_stop(ctx, NULL, 0, regs);
4480 if (ret) return ret;
4482 ctx->ctx_state = PFM_CTX_UNLOADED;
4485 * in system mode, we need to update the PMU directly
4486 * and the user level state of the caller, which may not
4487 * necessarily be the creator of the context.
4489 if (is_system) {
4492 * Update cpuinfo
4494 * local PMU is taken care of in pfm_stop()
4496 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4497 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4500 * save PMDs in context
4501 * release ownership
4503 pfm_flush_pmds(current, ctx);
4506 * at this point we are done with the PMU
4507 * so we can unreserve the resource.
4509 if (prev_state != PFM_CTX_ZOMBIE)
4510 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4513 * disconnect context from task
4515 task->thread.pfm_context = NULL;
4517 * disconnect task from context
4519 ctx->ctx_task = NULL;
4522 * There is nothing more to cleanup here.
4524 return 0;
4528 * per-task mode
4530 tregs = task == current ? regs : task_pt_regs(task);
4532 if (task == current) {
4534 * cancel user level control
4536 ia64_psr(regs)->sp = 1;
4538 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4541 * save PMDs to context
4542 * release ownership
4544 pfm_flush_pmds(task, ctx);
4547 * at this point we are done with the PMU
4548 * so we can unreserve the resource.
4550 * when state was ZOMBIE, we have already unreserved.
4552 if (prev_state != PFM_CTX_ZOMBIE)
4553 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4556 * reset activation counter and psr
4558 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4559 SET_LAST_CPU(ctx, -1);
4562 * PMU state will not be restored
4564 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4567 * break links between context and task
4569 task->thread.pfm_context = NULL;
4570 ctx->ctx_task = NULL;
4572 PFM_SET_WORK_PENDING(task, 0);
4574 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4575 ctx->ctx_fl_can_restart = 0;
4576 ctx->ctx_fl_going_zombie = 0;
4578 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4580 return 0;
4585 * called only from exit_thread(): task == current
4586 * we come here only if current has a context attached (loaded or masked)
4588 void
4589 pfm_exit_thread(struct task_struct *task)
4591 pfm_context_t *ctx;
4592 unsigned long flags;
4593 struct pt_regs *regs = task_pt_regs(task);
4594 int ret, state;
4595 int free_ok = 0;
4597 ctx = PFM_GET_CTX(task);
4599 PROTECT_CTX(ctx, flags);
4601 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4603 state = ctx->ctx_state;
4604 switch(state) {
4605 case PFM_CTX_UNLOADED:
4607 * only comes to this function if pfm_context is not NULL, i.e., cannot
4608 * be in unloaded state
4610 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4611 break;
4612 case PFM_CTX_LOADED:
4613 case PFM_CTX_MASKED:
4614 ret = pfm_context_unload(ctx, NULL, 0, regs);
4615 if (ret) {
4616 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4618 DPRINT(("ctx unloaded for current state was %d\n", state));
4620 pfm_end_notify_user(ctx);
4621 break;
4622 case PFM_CTX_ZOMBIE:
4623 ret = pfm_context_unload(ctx, NULL, 0, regs);
4624 if (ret) {
4625 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4627 free_ok = 1;
4628 break;
4629 default:
4630 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4631 break;
4633 UNPROTECT_CTX(ctx, flags);
4635 { u64 psr = pfm_get_psr();
4636 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4637 BUG_ON(GET_PMU_OWNER());
4638 BUG_ON(ia64_psr(regs)->up);
4639 BUG_ON(ia64_psr(regs)->pp);
4643 * All memory free operations (especially for vmalloc'ed memory)
4644 * MUST be done with interrupts ENABLED.
4646 if (free_ok) pfm_context_free(ctx);
4650 * functions MUST be listed in the increasing order of their index (see permfon.h)
4652 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4653 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4654 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4655 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4656 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4658 static pfm_cmd_desc_t pfm_cmd_tab[]={
4659 /* 0 */PFM_CMD_NONE,
4660 /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4661 /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4662 /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4663 /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4664 /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4665 /* 6 */PFM_CMD_NONE,
4666 /* 7 */PFM_CMD_NONE,
4667 /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4668 /* 9 */PFM_CMD_NONE,
4669 /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4670 /* 11 */PFM_CMD_NONE,
4671 /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4672 /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4673 /* 14 */PFM_CMD_NONE,
4674 /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4675 /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4676 /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4677 /* 18 */PFM_CMD_NONE,
4678 /* 19 */PFM_CMD_NONE,
4679 /* 20 */PFM_CMD_NONE,
4680 /* 21 */PFM_CMD_NONE,
4681 /* 22 */PFM_CMD_NONE,
4682 /* 23 */PFM_CMD_NONE,
4683 /* 24 */PFM_CMD_NONE,
4684 /* 25 */PFM_CMD_NONE,
4685 /* 26 */PFM_CMD_NONE,
4686 /* 27 */PFM_CMD_NONE,
4687 /* 28 */PFM_CMD_NONE,
4688 /* 29 */PFM_CMD_NONE,
4689 /* 30 */PFM_CMD_NONE,
4690 /* 31 */PFM_CMD_NONE,
4691 /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4692 /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4694 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4696 static int
4697 pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4699 struct task_struct *task;
4700 int state, old_state;
4702 recheck:
4703 state = ctx->ctx_state;
4704 task = ctx->ctx_task;
4706 if (task == NULL) {
4707 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4708 return 0;
4711 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4712 ctx->ctx_fd,
4713 state,
4714 task_pid_nr(task),
4715 task->state, PFM_CMD_STOPPED(cmd)));
4718 * self-monitoring always ok.
4720 * for system-wide the caller can either be the creator of the
4721 * context (to one to which the context is attached to) OR
4722 * a task running on the same CPU as the session.
4724 if (task == current || ctx->ctx_fl_system) return 0;
4727 * we are monitoring another thread
4729 switch(state) {
4730 case PFM_CTX_UNLOADED:
4732 * if context is UNLOADED we are safe to go
4734 return 0;
4735 case PFM_CTX_ZOMBIE:
4737 * no command can operate on a zombie context
4739 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4740 return -EINVAL;
4741 case PFM_CTX_MASKED:
4743 * PMU state has been saved to software even though
4744 * the thread may still be running.
4746 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4750 * context is LOADED or MASKED. Some commands may need to have
4751 * the task stopped.
4753 * We could lift this restriction for UP but it would mean that
4754 * the user has no guarantee the task would not run between
4755 * two successive calls to perfmonctl(). That's probably OK.
4756 * If this user wants to ensure the task does not run, then
4757 * the task must be stopped.
4759 if (PFM_CMD_STOPPED(cmd)) {
4760 if (!task_is_stopped_or_traced(task)) {
4761 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4762 return -EBUSY;
4765 * task is now stopped, wait for ctxsw out
4767 * This is an interesting point in the code.
4768 * We need to unprotect the context because
4769 * the pfm_save_regs() routines needs to grab
4770 * the same lock. There are danger in doing
4771 * this because it leaves a window open for
4772 * another task to get access to the context
4773 * and possibly change its state. The one thing
4774 * that is not possible is for the context to disappear
4775 * because we are protected by the VFS layer, i.e.,
4776 * get_fd()/put_fd().
4778 old_state = state;
4780 UNPROTECT_CTX(ctx, flags);
4782 wait_task_inactive(task, 0);
4784 PROTECT_CTX(ctx, flags);
4787 * we must recheck to verify if state has changed
4789 if (ctx->ctx_state != old_state) {
4790 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4791 goto recheck;
4794 return 0;
4798 * system-call entry point (must return long)
4800 asmlinkage long
4801 sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4803 struct file *file = NULL;
4804 pfm_context_t *ctx = NULL;
4805 unsigned long flags = 0UL;
4806 void *args_k = NULL;
4807 long ret; /* will expand int return types */
4808 size_t base_sz, sz, xtra_sz = 0;
4809 int narg, completed_args = 0, call_made = 0, cmd_flags;
4810 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4811 int (*getsize)(void *arg, size_t *sz);
4812 #define PFM_MAX_ARGSIZE 4096
4815 * reject any call if perfmon was disabled at initialization
4817 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4819 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4820 DPRINT(("invalid cmd=%d\n", cmd));
4821 return -EINVAL;
4824 func = pfm_cmd_tab[cmd].cmd_func;
4825 narg = pfm_cmd_tab[cmd].cmd_narg;
4826 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4827 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4828 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4830 if (unlikely(func == NULL)) {
4831 DPRINT(("invalid cmd=%d\n", cmd));
4832 return -EINVAL;
4835 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4836 PFM_CMD_NAME(cmd),
4837 cmd,
4838 narg,
4839 base_sz,
4840 count));
4843 * check if number of arguments matches what the command expects
4845 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4846 return -EINVAL;
4848 restart_args:
4849 sz = xtra_sz + base_sz*count;
4851 * limit abuse to min page size
4853 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4854 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4855 return -E2BIG;
4859 * allocate default-sized argument buffer
4861 if (likely(count && args_k == NULL)) {
4862 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4863 if (args_k == NULL) return -ENOMEM;
4866 ret = -EFAULT;
4869 * copy arguments
4871 * assume sz = 0 for command without parameters
4873 if (sz && copy_from_user(args_k, arg, sz)) {
4874 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4875 goto error_args;
4879 * check if command supports extra parameters
4881 if (completed_args == 0 && getsize) {
4883 * get extra parameters size (based on main argument)
4885 ret = (*getsize)(args_k, &xtra_sz);
4886 if (ret) goto error_args;
4888 completed_args = 1;
4890 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4892 /* retry if necessary */
4893 if (likely(xtra_sz)) goto restart_args;
4896 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4898 ret = -EBADF;
4900 file = fget(fd);
4901 if (unlikely(file == NULL)) {
4902 DPRINT(("invalid fd %d\n", fd));
4903 goto error_args;
4905 if (unlikely(PFM_IS_FILE(file) == 0)) {
4906 DPRINT(("fd %d not related to perfmon\n", fd));
4907 goto error_args;
4910 ctx = file->private_data;
4911 if (unlikely(ctx == NULL)) {
4912 DPRINT(("no context for fd %d\n", fd));
4913 goto error_args;
4915 prefetch(&ctx->ctx_state);
4917 PROTECT_CTX(ctx, flags);
4920 * check task is stopped
4922 ret = pfm_check_task_state(ctx, cmd, flags);
4923 if (unlikely(ret)) goto abort_locked;
4925 skip_fd:
4926 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4928 call_made = 1;
4930 abort_locked:
4931 if (likely(ctx)) {
4932 DPRINT(("context unlocked\n"));
4933 UNPROTECT_CTX(ctx, flags);
4936 /* copy argument back to user, if needed */
4937 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4939 error_args:
4940 if (file)
4941 fput(file);
4943 kfree(args_k);
4945 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4947 return ret;
4950 static void
4951 pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4953 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4954 pfm_ovfl_ctrl_t rst_ctrl;
4955 int state;
4956 int ret = 0;
4958 state = ctx->ctx_state;
4960 * Unlock sampling buffer and reset index atomically
4961 * XXX: not really needed when blocking
4963 if (CTX_HAS_SMPL(ctx)) {
4965 rst_ctrl.bits.mask_monitoring = 0;
4966 rst_ctrl.bits.reset_ovfl_pmds = 0;
4968 if (state == PFM_CTX_LOADED)
4969 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4970 else
4971 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4972 } else {
4973 rst_ctrl.bits.mask_monitoring = 0;
4974 rst_ctrl.bits.reset_ovfl_pmds = 1;
4977 if (ret == 0) {
4978 if (rst_ctrl.bits.reset_ovfl_pmds) {
4979 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4981 if (rst_ctrl.bits.mask_monitoring == 0) {
4982 DPRINT(("resuming monitoring\n"));
4983 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4984 } else {
4985 DPRINT(("stopping monitoring\n"));
4986 //pfm_stop_monitoring(current, regs);
4988 ctx->ctx_state = PFM_CTX_LOADED;
4993 * context MUST BE LOCKED when calling
4994 * can only be called for current
4996 static void
4997 pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4999 int ret;
5001 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5003 ret = pfm_context_unload(ctx, NULL, 0, regs);
5004 if (ret) {
5005 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5009 * and wakeup controlling task, indicating we are now disconnected
5011 wake_up_interruptible(&ctx->ctx_zombieq);
5014 * given that context is still locked, the controlling
5015 * task will only get access when we return from
5016 * pfm_handle_work().
5020 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
5023 * pfm_handle_work() can be called with interrupts enabled
5024 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5025 * call may sleep, therefore we must re-enable interrupts
5026 * to avoid deadlocks. It is safe to do so because this function
5027 * is called ONLY when returning to user level (pUStk=1), in which case
5028 * there is no risk of kernel stack overflow due to deep
5029 * interrupt nesting.
5031 void
5032 pfm_handle_work(void)
5034 pfm_context_t *ctx;
5035 struct pt_regs *regs;
5036 unsigned long flags, dummy_flags;
5037 unsigned long ovfl_regs;
5038 unsigned int reason;
5039 int ret;
5041 ctx = PFM_GET_CTX(current);
5042 if (ctx == NULL) {
5043 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5044 task_pid_nr(current));
5045 return;
5048 PROTECT_CTX(ctx, flags);
5050 PFM_SET_WORK_PENDING(current, 0);
5052 regs = task_pt_regs(current);
5055 * extract reason for being here and clear
5057 reason = ctx->ctx_fl_trap_reason;
5058 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5059 ovfl_regs = ctx->ctx_ovfl_regs[0];
5061 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5064 * must be done before we check for simple-reset mode
5066 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5067 goto do_zombie;
5069 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5070 if (reason == PFM_TRAP_REASON_RESET)
5071 goto skip_blocking;
5074 * restore interrupt mask to what it was on entry.
5075 * Could be enabled/diasbled.
5077 UNPROTECT_CTX(ctx, flags);
5080 * force interrupt enable because of down_interruptible()
5082 local_irq_enable();
5084 DPRINT(("before block sleeping\n"));
5087 * may go through without blocking on SMP systems
5088 * if restart has been received already by the time we call down()
5090 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5092 DPRINT(("after block sleeping ret=%d\n", ret));
5095 * lock context and mask interrupts again
5096 * We save flags into a dummy because we may have
5097 * altered interrupts mask compared to entry in this
5098 * function.
5100 PROTECT_CTX(ctx, dummy_flags);
5103 * we need to read the ovfl_regs only after wake-up
5104 * because we may have had pfm_write_pmds() in between
5105 * and that can changed PMD values and therefore
5106 * ovfl_regs is reset for these new PMD values.
5108 ovfl_regs = ctx->ctx_ovfl_regs[0];
5110 if (ctx->ctx_fl_going_zombie) {
5111 do_zombie:
5112 DPRINT(("context is zombie, bailing out\n"));
5113 pfm_context_force_terminate(ctx, regs);
5114 goto nothing_to_do;
5117 * in case of interruption of down() we don't restart anything
5119 if (ret < 0)
5120 goto nothing_to_do;
5122 skip_blocking:
5123 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5124 ctx->ctx_ovfl_regs[0] = 0UL;
5126 nothing_to_do:
5128 * restore flags as they were upon entry
5130 UNPROTECT_CTX(ctx, flags);
5133 static int
5134 pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5136 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5137 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5138 return 0;
5141 DPRINT(("waking up somebody\n"));
5143 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5146 * safe, we are not in intr handler, nor in ctxsw when
5147 * we come here
5149 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5151 return 0;
5154 static int
5155 pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5157 pfm_msg_t *msg = NULL;
5159 if (ctx->ctx_fl_no_msg == 0) {
5160 msg = pfm_get_new_msg(ctx);
5161 if (msg == NULL) {
5162 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5163 return -1;
5166 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5167 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5168 msg->pfm_ovfl_msg.msg_active_set = 0;
5169 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5170 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5171 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5172 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5173 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5176 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5177 msg,
5178 ctx->ctx_fl_no_msg,
5179 ctx->ctx_fd,
5180 ovfl_pmds));
5182 return pfm_notify_user(ctx, msg);
5185 static int
5186 pfm_end_notify_user(pfm_context_t *ctx)
5188 pfm_msg_t *msg;
5190 msg = pfm_get_new_msg(ctx);
5191 if (msg == NULL) {
5192 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5193 return -1;
5195 /* no leak */
5196 memset(msg, 0, sizeof(*msg));
5198 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5199 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5200 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5202 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5203 msg,
5204 ctx->ctx_fl_no_msg,
5205 ctx->ctx_fd));
5207 return pfm_notify_user(ctx, msg);
5211 * main overflow processing routine.
5212 * it can be called from the interrupt path or explicitly during the context switch code
5214 static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5215 unsigned long pmc0, struct pt_regs *regs)
5217 pfm_ovfl_arg_t *ovfl_arg;
5218 unsigned long mask;
5219 unsigned long old_val, ovfl_val, new_val;
5220 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5221 unsigned long tstamp;
5222 pfm_ovfl_ctrl_t ovfl_ctrl;
5223 unsigned int i, has_smpl;
5224 int must_notify = 0;
5226 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5229 * sanity test. Should never happen
5231 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5233 tstamp = ia64_get_itc();
5234 mask = pmc0 >> PMU_FIRST_COUNTER;
5235 ovfl_val = pmu_conf->ovfl_val;
5236 has_smpl = CTX_HAS_SMPL(ctx);
5238 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5239 "used_pmds=0x%lx\n",
5240 pmc0,
5241 task ? task_pid_nr(task): -1,
5242 (regs ? regs->cr_iip : 0),
5243 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5244 ctx->ctx_used_pmds[0]));
5248 * first we update the virtual counters
5249 * assume there was a prior ia64_srlz_d() issued
5251 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5253 /* skip pmd which did not overflow */
5254 if ((mask & 0x1) == 0) continue;
5257 * Note that the pmd is not necessarily 0 at this point as qualified events
5258 * may have happened before the PMU was frozen. The residual count is not
5259 * taken into consideration here but will be with any read of the pmd via
5260 * pfm_read_pmds().
5262 old_val = new_val = ctx->ctx_pmds[i].val;
5263 new_val += 1 + ovfl_val;
5264 ctx->ctx_pmds[i].val = new_val;
5267 * check for overflow condition
5269 if (likely(old_val > new_val)) {
5270 ovfl_pmds |= 1UL << i;
5271 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5274 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5276 new_val,
5277 old_val,
5278 ia64_get_pmd(i) & ovfl_val,
5279 ovfl_pmds,
5280 ovfl_notify));
5284 * there was no 64-bit overflow, nothing else to do
5286 if (ovfl_pmds == 0UL) return;
5289 * reset all control bits
5291 ovfl_ctrl.val = 0;
5292 reset_pmds = 0UL;
5295 * if a sampling format module exists, then we "cache" the overflow by
5296 * calling the module's handler() routine.
5298 if (has_smpl) {
5299 unsigned long start_cycles, end_cycles;
5300 unsigned long pmd_mask;
5301 int j, k, ret = 0;
5302 int this_cpu = smp_processor_id();
5304 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5305 ovfl_arg = &ctx->ctx_ovfl_arg;
5307 prefetch(ctx->ctx_smpl_hdr);
5309 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5311 mask = 1UL << i;
5313 if ((pmd_mask & 0x1) == 0) continue;
5315 ovfl_arg->ovfl_pmd = (unsigned char )i;
5316 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5317 ovfl_arg->active_set = 0;
5318 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5319 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5321 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5322 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5323 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5326 * copy values of pmds of interest. Sampling format may copy them
5327 * into sampling buffer.
5329 if (smpl_pmds) {
5330 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5331 if ((smpl_pmds & 0x1) == 0) continue;
5332 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5333 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5337 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5339 start_cycles = ia64_get_itc();
5342 * call custom buffer format record (handler) routine
5344 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5346 end_cycles = ia64_get_itc();
5349 * For those controls, we take the union because they have
5350 * an all or nothing behavior.
5352 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5353 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5354 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5356 * build the bitmask of pmds to reset now
5358 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5360 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5363 * when the module cannot handle the rest of the overflows, we abort right here
5365 if (ret && pmd_mask) {
5366 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5367 pmd_mask<<PMU_FIRST_COUNTER));
5370 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5372 ovfl_pmds &= ~reset_pmds;
5373 } else {
5375 * when no sampling module is used, then the default
5376 * is to notify on overflow if requested by user
5378 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5379 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5380 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5381 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5383 * if needed, we reset all overflowed pmds
5385 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5388 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5391 * reset the requested PMD registers using the short reset values
5393 if (reset_pmds) {
5394 unsigned long bm = reset_pmds;
5395 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5398 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5400 * keep track of what to reset when unblocking
5402 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5405 * check for blocking context
5407 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5409 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5412 * set the perfmon specific checking pending work for the task
5414 PFM_SET_WORK_PENDING(task, 1);
5417 * when coming from ctxsw, current still points to the
5418 * previous task, therefore we must work with task and not current.
5420 set_notify_resume(task);
5423 * defer until state is changed (shorten spin window). the context is locked
5424 * anyway, so the signal receiver would come spin for nothing.
5426 must_notify = 1;
5429 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5430 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5431 PFM_GET_WORK_PENDING(task),
5432 ctx->ctx_fl_trap_reason,
5433 ovfl_pmds,
5434 ovfl_notify,
5435 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5437 * in case monitoring must be stopped, we toggle the psr bits
5439 if (ovfl_ctrl.bits.mask_monitoring) {
5440 pfm_mask_monitoring(task);
5441 ctx->ctx_state = PFM_CTX_MASKED;
5442 ctx->ctx_fl_can_restart = 1;
5446 * send notification now
5448 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5450 return;
5452 sanity_check:
5453 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5454 smp_processor_id(),
5455 task ? task_pid_nr(task) : -1,
5456 pmc0);
5457 return;
5459 stop_monitoring:
5461 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5462 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5463 * come here as zombie only if the task is the current task. In which case, we
5464 * can access the PMU hardware directly.
5466 * Note that zombies do have PM_VALID set. So here we do the minimal.
5468 * In case the context was zombified it could not be reclaimed at the time
5469 * the monitoring program exited. At this point, the PMU reservation has been
5470 * returned, the sampiing buffer has been freed. We must convert this call
5471 * into a spurious interrupt. However, we must also avoid infinite overflows
5472 * by stopping monitoring for this task. We can only come here for a per-task
5473 * context. All we need to do is to stop monitoring using the psr bits which
5474 * are always task private. By re-enabling secure montioring, we ensure that
5475 * the monitored task will not be able to re-activate monitoring.
5476 * The task will eventually be context switched out, at which point the context
5477 * will be reclaimed (that includes releasing ownership of the PMU).
5479 * So there might be a window of time where the number of per-task session is zero
5480 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5481 * context. This is safe because if a per-task session comes in, it will push this one
5482 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5483 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5484 * also push our zombie context out.
5486 * Overall pretty hairy stuff....
5488 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5489 pfm_clear_psr_up();
5490 ia64_psr(regs)->up = 0;
5491 ia64_psr(regs)->sp = 1;
5492 return;
5495 static int
5496 pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5498 struct task_struct *task;
5499 pfm_context_t *ctx;
5500 unsigned long flags;
5501 u64 pmc0;
5502 int this_cpu = smp_processor_id();
5503 int retval = 0;
5505 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5508 * srlz.d done before arriving here
5510 pmc0 = ia64_get_pmc(0);
5512 task = GET_PMU_OWNER();
5513 ctx = GET_PMU_CTX();
5516 * if we have some pending bits set
5517 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5519 if (PMC0_HAS_OVFL(pmc0) && task) {
5521 * we assume that pmc0.fr is always set here
5524 /* sanity check */
5525 if (!ctx) goto report_spurious1;
5527 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5528 goto report_spurious2;
5530 PROTECT_CTX_NOPRINT(ctx, flags);
5532 pfm_overflow_handler(task, ctx, pmc0, regs);
5534 UNPROTECT_CTX_NOPRINT(ctx, flags);
5536 } else {
5537 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5538 retval = -1;
5541 * keep it unfrozen at all times
5543 pfm_unfreeze_pmu();
5545 return retval;
5547 report_spurious1:
5548 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5549 this_cpu, task_pid_nr(task));
5550 pfm_unfreeze_pmu();
5551 return -1;
5552 report_spurious2:
5553 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5554 this_cpu,
5555 task_pid_nr(task));
5556 pfm_unfreeze_pmu();
5557 return -1;
5560 static irqreturn_t
5561 pfm_interrupt_handler(int irq, void *arg)
5563 unsigned long start_cycles, total_cycles;
5564 unsigned long min, max;
5565 int this_cpu;
5566 int ret;
5567 struct pt_regs *regs = get_irq_regs();
5569 this_cpu = get_cpu();
5570 if (likely(!pfm_alt_intr_handler)) {
5571 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5572 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5574 start_cycles = ia64_get_itc();
5576 ret = pfm_do_interrupt_handler(arg, regs);
5578 total_cycles = ia64_get_itc();
5581 * don't measure spurious interrupts
5583 if (likely(ret == 0)) {
5584 total_cycles -= start_cycles;
5586 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5587 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5589 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5592 else {
5593 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5596 put_cpu();
5597 return IRQ_HANDLED;
5601 * /proc/perfmon interface, for debug only
5604 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5606 static void *
5607 pfm_proc_start(struct seq_file *m, loff_t *pos)
5609 if (*pos == 0) {
5610 return PFM_PROC_SHOW_HEADER;
5613 while (*pos <= nr_cpu_ids) {
5614 if (cpu_online(*pos - 1)) {
5615 return (void *)*pos;
5617 ++*pos;
5619 return NULL;
5622 static void *
5623 pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5625 ++*pos;
5626 return pfm_proc_start(m, pos);
5629 static void
5630 pfm_proc_stop(struct seq_file *m, void *v)
5634 static void
5635 pfm_proc_show_header(struct seq_file *m)
5637 struct list_head * pos;
5638 pfm_buffer_fmt_t * entry;
5639 unsigned long flags;
5641 seq_printf(m,
5642 "perfmon version : %u.%u\n"
5643 "model : %s\n"
5644 "fastctxsw : %s\n"
5645 "expert mode : %s\n"
5646 "ovfl_mask : 0x%lx\n"
5647 "PMU flags : 0x%x\n",
5648 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5649 pmu_conf->pmu_name,
5650 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5651 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5652 pmu_conf->ovfl_val,
5653 pmu_conf->flags);
5655 LOCK_PFS(flags);
5657 seq_printf(m,
5658 "proc_sessions : %u\n"
5659 "sys_sessions : %u\n"
5660 "sys_use_dbregs : %u\n"
5661 "ptrace_use_dbregs : %u\n",
5662 pfm_sessions.pfs_task_sessions,
5663 pfm_sessions.pfs_sys_sessions,
5664 pfm_sessions.pfs_sys_use_dbregs,
5665 pfm_sessions.pfs_ptrace_use_dbregs);
5667 UNLOCK_PFS(flags);
5669 spin_lock(&pfm_buffer_fmt_lock);
5671 list_for_each(pos, &pfm_buffer_fmt_list) {
5672 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5673 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5674 entry->fmt_uuid[0],
5675 entry->fmt_uuid[1],
5676 entry->fmt_uuid[2],
5677 entry->fmt_uuid[3],
5678 entry->fmt_uuid[4],
5679 entry->fmt_uuid[5],
5680 entry->fmt_uuid[6],
5681 entry->fmt_uuid[7],
5682 entry->fmt_uuid[8],
5683 entry->fmt_uuid[9],
5684 entry->fmt_uuid[10],
5685 entry->fmt_uuid[11],
5686 entry->fmt_uuid[12],
5687 entry->fmt_uuid[13],
5688 entry->fmt_uuid[14],
5689 entry->fmt_uuid[15],
5690 entry->fmt_name);
5692 spin_unlock(&pfm_buffer_fmt_lock);
5696 static int
5697 pfm_proc_show(struct seq_file *m, void *v)
5699 unsigned long psr;
5700 unsigned int i;
5701 int cpu;
5703 if (v == PFM_PROC_SHOW_HEADER) {
5704 pfm_proc_show_header(m);
5705 return 0;
5708 /* show info for CPU (v - 1) */
5710 cpu = (long)v - 1;
5711 seq_printf(m,
5712 "CPU%-2d overflow intrs : %lu\n"
5713 "CPU%-2d overflow cycles : %lu\n"
5714 "CPU%-2d overflow min : %lu\n"
5715 "CPU%-2d overflow max : %lu\n"
5716 "CPU%-2d smpl handler calls : %lu\n"
5717 "CPU%-2d smpl handler cycles : %lu\n"
5718 "CPU%-2d spurious intrs : %lu\n"
5719 "CPU%-2d replay intrs : %lu\n"
5720 "CPU%-2d syst_wide : %d\n"
5721 "CPU%-2d dcr_pp : %d\n"
5722 "CPU%-2d exclude idle : %d\n"
5723 "CPU%-2d owner : %d\n"
5724 "CPU%-2d context : %p\n"
5725 "CPU%-2d activations : %lu\n",
5726 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5727 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5728 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5729 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5730 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5731 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5732 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5733 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5734 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5735 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5736 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5737 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5738 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5739 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5741 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5743 psr = pfm_get_psr();
5745 ia64_srlz_d();
5747 seq_printf(m,
5748 "CPU%-2d psr : 0x%lx\n"
5749 "CPU%-2d pmc0 : 0x%lx\n",
5750 cpu, psr,
5751 cpu, ia64_get_pmc(0));
5753 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5754 if (PMC_IS_COUNTING(i) == 0) continue;
5755 seq_printf(m,
5756 "CPU%-2d pmc%u : 0x%lx\n"
5757 "CPU%-2d pmd%u : 0x%lx\n",
5758 cpu, i, ia64_get_pmc(i),
5759 cpu, i, ia64_get_pmd(i));
5762 return 0;
5765 const struct seq_operations pfm_seq_ops = {
5766 .start = pfm_proc_start,
5767 .next = pfm_proc_next,
5768 .stop = pfm_proc_stop,
5769 .show = pfm_proc_show
5772 static int
5773 pfm_proc_open(struct inode *inode, struct file *file)
5775 return seq_open(file, &pfm_seq_ops);
5780 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5781 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5782 * is active or inactive based on mode. We must rely on the value in
5783 * local_cpu_data->pfm_syst_info
5785 void
5786 pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5788 struct pt_regs *regs;
5789 unsigned long dcr;
5790 unsigned long dcr_pp;
5792 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5795 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5796 * on every CPU, so we can rely on the pid to identify the idle task.
5798 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5799 regs = task_pt_regs(task);
5800 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5801 return;
5804 * if monitoring has started
5806 if (dcr_pp) {
5807 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5809 * context switching in?
5811 if (is_ctxswin) {
5812 /* mask monitoring for the idle task */
5813 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5814 pfm_clear_psr_pp();
5815 ia64_srlz_i();
5816 return;
5819 * context switching out
5820 * restore monitoring for next task
5822 * Due to inlining this odd if-then-else construction generates
5823 * better code.
5825 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5826 pfm_set_psr_pp();
5827 ia64_srlz_i();
5831 #ifdef CONFIG_SMP
5833 static void
5834 pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5836 struct task_struct *task = ctx->ctx_task;
5838 ia64_psr(regs)->up = 0;
5839 ia64_psr(regs)->sp = 1;
5841 if (GET_PMU_OWNER() == task) {
5842 DPRINT(("cleared ownership for [%d]\n",
5843 task_pid_nr(ctx->ctx_task)));
5844 SET_PMU_OWNER(NULL, NULL);
5848 * disconnect the task from the context and vice-versa
5850 PFM_SET_WORK_PENDING(task, 0);
5852 task->thread.pfm_context = NULL;
5853 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5855 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5860 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5862 void
5863 pfm_save_regs(struct task_struct *task)
5865 pfm_context_t *ctx;
5866 unsigned long flags;
5867 u64 psr;
5870 ctx = PFM_GET_CTX(task);
5871 if (ctx == NULL) return;
5874 * we always come here with interrupts ALREADY disabled by
5875 * the scheduler. So we simply need to protect against concurrent
5876 * access, not CPU concurrency.
5878 flags = pfm_protect_ctx_ctxsw(ctx);
5880 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5881 struct pt_regs *regs = task_pt_regs(task);
5883 pfm_clear_psr_up();
5885 pfm_force_cleanup(ctx, regs);
5887 BUG_ON(ctx->ctx_smpl_hdr);
5889 pfm_unprotect_ctx_ctxsw(ctx, flags);
5891 pfm_context_free(ctx);
5892 return;
5896 * save current PSR: needed because we modify it
5898 ia64_srlz_d();
5899 psr = pfm_get_psr();
5901 BUG_ON(psr & (IA64_PSR_I));
5904 * stop monitoring:
5905 * This is the last instruction which may generate an overflow
5907 * We do not need to set psr.sp because, it is irrelevant in kernel.
5908 * It will be restored from ipsr when going back to user level
5910 pfm_clear_psr_up();
5913 * keep a copy of psr.up (for reload)
5915 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5918 * release ownership of this PMU.
5919 * PM interrupts are masked, so nothing
5920 * can happen.
5922 SET_PMU_OWNER(NULL, NULL);
5925 * we systematically save the PMD as we have no
5926 * guarantee we will be schedule at that same
5927 * CPU again.
5929 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5932 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5933 * we will need it on the restore path to check
5934 * for pending overflow.
5936 ctx->th_pmcs[0] = ia64_get_pmc(0);
5939 * unfreeze PMU if had pending overflows
5941 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5944 * finally, allow context access.
5945 * interrupts will still be masked after this call.
5947 pfm_unprotect_ctx_ctxsw(ctx, flags);
5950 #else /* !CONFIG_SMP */
5951 void
5952 pfm_save_regs(struct task_struct *task)
5954 pfm_context_t *ctx;
5955 u64 psr;
5957 ctx = PFM_GET_CTX(task);
5958 if (ctx == NULL) return;
5961 * save current PSR: needed because we modify it
5963 psr = pfm_get_psr();
5965 BUG_ON(psr & (IA64_PSR_I));
5968 * stop monitoring:
5969 * This is the last instruction which may generate an overflow
5971 * We do not need to set psr.sp because, it is irrelevant in kernel.
5972 * It will be restored from ipsr when going back to user level
5974 pfm_clear_psr_up();
5977 * keep a copy of psr.up (for reload)
5979 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5982 static void
5983 pfm_lazy_save_regs (struct task_struct *task)
5985 pfm_context_t *ctx;
5986 unsigned long flags;
5988 { u64 psr = pfm_get_psr();
5989 BUG_ON(psr & IA64_PSR_UP);
5992 ctx = PFM_GET_CTX(task);
5995 * we need to mask PMU overflow here to
5996 * make sure that we maintain pmc0 until
5997 * we save it. overflow interrupts are
5998 * treated as spurious if there is no
5999 * owner.
6001 * XXX: I don't think this is necessary
6003 PROTECT_CTX(ctx,flags);
6006 * release ownership of this PMU.
6007 * must be done before we save the registers.
6009 * after this call any PMU interrupt is treated
6010 * as spurious.
6012 SET_PMU_OWNER(NULL, NULL);
6015 * save all the pmds we use
6017 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6020 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6021 * it is needed to check for pended overflow
6022 * on the restore path
6024 ctx->th_pmcs[0] = ia64_get_pmc(0);
6027 * unfreeze PMU if had pending overflows
6029 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6032 * now get can unmask PMU interrupts, they will
6033 * be treated as purely spurious and we will not
6034 * lose any information
6036 UNPROTECT_CTX(ctx,flags);
6038 #endif /* CONFIG_SMP */
6040 #ifdef CONFIG_SMP
6042 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6044 void
6045 pfm_load_regs (struct task_struct *task)
6047 pfm_context_t *ctx;
6048 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6049 unsigned long flags;
6050 u64 psr, psr_up;
6051 int need_irq_resend;
6053 ctx = PFM_GET_CTX(task);
6054 if (unlikely(ctx == NULL)) return;
6056 BUG_ON(GET_PMU_OWNER());
6059 * possible on unload
6061 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6064 * we always come here with interrupts ALREADY disabled by
6065 * the scheduler. So we simply need to protect against concurrent
6066 * access, not CPU concurrency.
6068 flags = pfm_protect_ctx_ctxsw(ctx);
6069 psr = pfm_get_psr();
6071 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6073 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6074 BUG_ON(psr & IA64_PSR_I);
6076 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6077 struct pt_regs *regs = task_pt_regs(task);
6079 BUG_ON(ctx->ctx_smpl_hdr);
6081 pfm_force_cleanup(ctx, regs);
6083 pfm_unprotect_ctx_ctxsw(ctx, flags);
6086 * this one (kmalloc'ed) is fine with interrupts disabled
6088 pfm_context_free(ctx);
6090 return;
6094 * we restore ALL the debug registers to avoid picking up
6095 * stale state.
6097 if (ctx->ctx_fl_using_dbreg) {
6098 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6099 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6102 * retrieve saved psr.up
6104 psr_up = ctx->ctx_saved_psr_up;
6107 * if we were the last user of the PMU on that CPU,
6108 * then nothing to do except restore psr
6110 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6113 * retrieve partial reload masks (due to user modifications)
6115 pmc_mask = ctx->ctx_reload_pmcs[0];
6116 pmd_mask = ctx->ctx_reload_pmds[0];
6118 } else {
6120 * To avoid leaking information to the user level when psr.sp=0,
6121 * we must reload ALL implemented pmds (even the ones we don't use).
6122 * In the kernel we only allow PFM_READ_PMDS on registers which
6123 * we initialized or requested (sampling) so there is no risk there.
6125 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6128 * ALL accessible PMCs are systematically reloaded, unused registers
6129 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6130 * up stale configuration.
6132 * PMC0 is never in the mask. It is always restored separately.
6134 pmc_mask = ctx->ctx_all_pmcs[0];
6137 * when context is MASKED, we will restore PMC with plm=0
6138 * and PMD with stale information, but that's ok, nothing
6139 * will be captured.
6141 * XXX: optimize here
6143 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6144 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6147 * check for pending overflow at the time the state
6148 * was saved.
6150 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6152 * reload pmc0 with the overflow information
6153 * On McKinley PMU, this will trigger a PMU interrupt
6155 ia64_set_pmc(0, ctx->th_pmcs[0]);
6156 ia64_srlz_d();
6157 ctx->th_pmcs[0] = 0UL;
6160 * will replay the PMU interrupt
6162 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6164 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6168 * we just did a reload, so we reset the partial reload fields
6170 ctx->ctx_reload_pmcs[0] = 0UL;
6171 ctx->ctx_reload_pmds[0] = 0UL;
6173 SET_LAST_CPU(ctx, smp_processor_id());
6176 * dump activation value for this PMU
6178 INC_ACTIVATION();
6180 * record current activation for this context
6182 SET_ACTIVATION(ctx);
6185 * establish new ownership.
6187 SET_PMU_OWNER(task, ctx);
6190 * restore the psr.up bit. measurement
6191 * is active again.
6192 * no PMU interrupt can happen at this point
6193 * because we still have interrupts disabled.
6195 if (likely(psr_up)) pfm_set_psr_up();
6198 * allow concurrent access to context
6200 pfm_unprotect_ctx_ctxsw(ctx, flags);
6202 #else /* !CONFIG_SMP */
6204 * reload PMU state for UP kernels
6205 * in 2.5 we come here with interrupts disabled
6207 void
6208 pfm_load_regs (struct task_struct *task)
6210 pfm_context_t *ctx;
6211 struct task_struct *owner;
6212 unsigned long pmd_mask, pmc_mask;
6213 u64 psr, psr_up;
6214 int need_irq_resend;
6216 owner = GET_PMU_OWNER();
6217 ctx = PFM_GET_CTX(task);
6218 psr = pfm_get_psr();
6220 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6221 BUG_ON(psr & IA64_PSR_I);
6224 * we restore ALL the debug registers to avoid picking up
6225 * stale state.
6227 * This must be done even when the task is still the owner
6228 * as the registers may have been modified via ptrace()
6229 * (not perfmon) by the previous task.
6231 if (ctx->ctx_fl_using_dbreg) {
6232 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6233 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6237 * retrieved saved psr.up
6239 psr_up = ctx->ctx_saved_psr_up;
6240 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6243 * short path, our state is still there, just
6244 * need to restore psr and we go
6246 * we do not touch either PMC nor PMD. the psr is not touched
6247 * by the overflow_handler. So we are safe w.r.t. to interrupt
6248 * concurrency even without interrupt masking.
6250 if (likely(owner == task)) {
6251 if (likely(psr_up)) pfm_set_psr_up();
6252 return;
6256 * someone else is still using the PMU, first push it out and
6257 * then we'll be able to install our stuff !
6259 * Upon return, there will be no owner for the current PMU
6261 if (owner) pfm_lazy_save_regs(owner);
6264 * To avoid leaking information to the user level when psr.sp=0,
6265 * we must reload ALL implemented pmds (even the ones we don't use).
6266 * In the kernel we only allow PFM_READ_PMDS on registers which
6267 * we initialized or requested (sampling) so there is no risk there.
6269 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6272 * ALL accessible PMCs are systematically reloaded, unused registers
6273 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6274 * up stale configuration.
6276 * PMC0 is never in the mask. It is always restored separately
6278 pmc_mask = ctx->ctx_all_pmcs[0];
6280 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6281 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6284 * check for pending overflow at the time the state
6285 * was saved.
6287 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6289 * reload pmc0 with the overflow information
6290 * On McKinley PMU, this will trigger a PMU interrupt
6292 ia64_set_pmc(0, ctx->th_pmcs[0]);
6293 ia64_srlz_d();
6295 ctx->th_pmcs[0] = 0UL;
6298 * will replay the PMU interrupt
6300 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6302 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6306 * establish new ownership.
6308 SET_PMU_OWNER(task, ctx);
6311 * restore the psr.up bit. measurement
6312 * is active again.
6313 * no PMU interrupt can happen at this point
6314 * because we still have interrupts disabled.
6316 if (likely(psr_up)) pfm_set_psr_up();
6318 #endif /* CONFIG_SMP */
6321 * this function assumes monitoring is stopped
6323 static void
6324 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6326 u64 pmc0;
6327 unsigned long mask2, val, pmd_val, ovfl_val;
6328 int i, can_access_pmu = 0;
6329 int is_self;
6332 * is the caller the task being monitored (or which initiated the
6333 * session for system wide measurements)
6335 is_self = ctx->ctx_task == task ? 1 : 0;
6338 * can access PMU is task is the owner of the PMU state on the current CPU
6339 * or if we are running on the CPU bound to the context in system-wide mode
6340 * (that is not necessarily the task the context is attached to in this mode).
6341 * In system-wide we always have can_access_pmu true because a task running on an
6342 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6344 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6345 if (can_access_pmu) {
6347 * Mark the PMU as not owned
6348 * This will cause the interrupt handler to do nothing in case an overflow
6349 * interrupt was in-flight
6350 * This also guarantees that pmc0 will contain the final state
6351 * It virtually gives us full control on overflow processing from that point
6352 * on.
6354 SET_PMU_OWNER(NULL, NULL);
6355 DPRINT(("releasing ownership\n"));
6358 * read current overflow status:
6360 * we are guaranteed to read the final stable state
6362 ia64_srlz_d();
6363 pmc0 = ia64_get_pmc(0); /* slow */
6366 * reset freeze bit, overflow status information destroyed
6368 pfm_unfreeze_pmu();
6369 } else {
6370 pmc0 = ctx->th_pmcs[0];
6372 * clear whatever overflow status bits there were
6374 ctx->th_pmcs[0] = 0;
6376 ovfl_val = pmu_conf->ovfl_val;
6378 * we save all the used pmds
6379 * we take care of overflows for counting PMDs
6381 * XXX: sampling situation is not taken into account here
6383 mask2 = ctx->ctx_used_pmds[0];
6385 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6387 for (i = 0; mask2; i++, mask2>>=1) {
6389 /* skip non used pmds */
6390 if ((mask2 & 0x1) == 0) continue;
6393 * can access PMU always true in system wide mode
6395 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6397 if (PMD_IS_COUNTING(i)) {
6398 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6399 task_pid_nr(task),
6401 ctx->ctx_pmds[i].val,
6402 val & ovfl_val));
6405 * we rebuild the full 64 bit value of the counter
6407 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6410 * now everything is in ctx_pmds[] and we need
6411 * to clear the saved context from save_regs() such that
6412 * pfm_read_pmds() gets the correct value
6414 pmd_val = 0UL;
6417 * take care of overflow inline
6419 if (pmc0 & (1UL << i)) {
6420 val += 1 + ovfl_val;
6421 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6425 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6427 if (is_self) ctx->th_pmds[i] = pmd_val;
6429 ctx->ctx_pmds[i].val = val;
6433 static struct irqaction perfmon_irqaction = {
6434 .handler = pfm_interrupt_handler,
6435 .flags = IRQF_DISABLED,
6436 .name = "perfmon"
6439 static void
6440 pfm_alt_save_pmu_state(void *data)
6442 struct pt_regs *regs;
6444 regs = task_pt_regs(current);
6446 DPRINT(("called\n"));
6449 * should not be necessary but
6450 * let's take not risk
6452 pfm_clear_psr_up();
6453 pfm_clear_psr_pp();
6454 ia64_psr(regs)->pp = 0;
6457 * This call is required
6458 * May cause a spurious interrupt on some processors
6460 pfm_freeze_pmu();
6462 ia64_srlz_d();
6465 void
6466 pfm_alt_restore_pmu_state(void *data)
6468 struct pt_regs *regs;
6470 regs = task_pt_regs(current);
6472 DPRINT(("called\n"));
6475 * put PMU back in state expected
6476 * by perfmon
6478 pfm_clear_psr_up();
6479 pfm_clear_psr_pp();
6480 ia64_psr(regs)->pp = 0;
6483 * perfmon runs with PMU unfrozen at all times
6485 pfm_unfreeze_pmu();
6487 ia64_srlz_d();
6491 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6493 int ret, i;
6494 int reserve_cpu;
6496 /* some sanity checks */
6497 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6499 /* do the easy test first */
6500 if (pfm_alt_intr_handler) return -EBUSY;
6502 /* one at a time in the install or remove, just fail the others */
6503 if (!spin_trylock(&pfm_alt_install_check)) {
6504 return -EBUSY;
6507 /* reserve our session */
6508 for_each_online_cpu(reserve_cpu) {
6509 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6510 if (ret) goto cleanup_reserve;
6513 /* save the current system wide pmu states */
6514 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6515 if (ret) {
6516 DPRINT(("on_each_cpu() failed: %d\n", ret));
6517 goto cleanup_reserve;
6520 /* officially change to the alternate interrupt handler */
6521 pfm_alt_intr_handler = hdl;
6523 spin_unlock(&pfm_alt_install_check);
6525 return 0;
6527 cleanup_reserve:
6528 for_each_online_cpu(i) {
6529 /* don't unreserve more than we reserved */
6530 if (i >= reserve_cpu) break;
6532 pfm_unreserve_session(NULL, 1, i);
6535 spin_unlock(&pfm_alt_install_check);
6537 return ret;
6539 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6542 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6544 int i;
6545 int ret;
6547 if (hdl == NULL) return -EINVAL;
6549 /* cannot remove someone else's handler! */
6550 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6552 /* one at a time in the install or remove, just fail the others */
6553 if (!spin_trylock(&pfm_alt_install_check)) {
6554 return -EBUSY;
6557 pfm_alt_intr_handler = NULL;
6559 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6560 if (ret) {
6561 DPRINT(("on_each_cpu() failed: %d\n", ret));
6564 for_each_online_cpu(i) {
6565 pfm_unreserve_session(NULL, 1, i);
6568 spin_unlock(&pfm_alt_install_check);
6570 return 0;
6572 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6575 * perfmon initialization routine, called from the initcall() table
6577 static int init_pfm_fs(void);
6579 static int __init
6580 pfm_probe_pmu(void)
6582 pmu_config_t **p;
6583 int family;
6585 family = local_cpu_data->family;
6586 p = pmu_confs;
6588 while(*p) {
6589 if ((*p)->probe) {
6590 if ((*p)->probe() == 0) goto found;
6591 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6592 goto found;
6594 p++;
6596 return -1;
6597 found:
6598 pmu_conf = *p;
6599 return 0;
6602 static const struct file_operations pfm_proc_fops = {
6603 .open = pfm_proc_open,
6604 .read = seq_read,
6605 .llseek = seq_lseek,
6606 .release = seq_release,
6609 int __init
6610 pfm_init(void)
6612 unsigned int n, n_counters, i;
6614 printk("perfmon: version %u.%u IRQ %u\n",
6615 PFM_VERSION_MAJ,
6616 PFM_VERSION_MIN,
6617 IA64_PERFMON_VECTOR);
6619 if (pfm_probe_pmu()) {
6620 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6621 local_cpu_data->family);
6622 return -ENODEV;
6626 * compute the number of implemented PMD/PMC from the
6627 * description tables
6629 n = 0;
6630 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6631 if (PMC_IS_IMPL(i) == 0) continue;
6632 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6633 n++;
6635 pmu_conf->num_pmcs = n;
6637 n = 0; n_counters = 0;
6638 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6639 if (PMD_IS_IMPL(i) == 0) continue;
6640 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6641 n++;
6642 if (PMD_IS_COUNTING(i)) n_counters++;
6644 pmu_conf->num_pmds = n;
6645 pmu_conf->num_counters = n_counters;
6648 * sanity checks on the number of debug registers
6650 if (pmu_conf->use_rr_dbregs) {
6651 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6652 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6653 pmu_conf = NULL;
6654 return -1;
6656 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6657 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6658 pmu_conf = NULL;
6659 return -1;
6663 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6664 pmu_conf->pmu_name,
6665 pmu_conf->num_pmcs,
6666 pmu_conf->num_pmds,
6667 pmu_conf->num_counters,
6668 ffz(pmu_conf->ovfl_val));
6670 /* sanity check */
6671 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6672 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6673 pmu_conf = NULL;
6674 return -1;
6678 * create /proc/perfmon (mostly for debugging purposes)
6680 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6681 if (perfmon_dir == NULL) {
6682 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6683 pmu_conf = NULL;
6684 return -1;
6688 * create /proc/sys/kernel/perfmon (for debugging purposes)
6690 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6693 * initialize all our spinlocks
6695 spin_lock_init(&pfm_sessions.pfs_lock);
6696 spin_lock_init(&pfm_buffer_fmt_lock);
6698 init_pfm_fs();
6700 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6702 return 0;
6705 __initcall(pfm_init);
6708 * this function is called before pfm_init()
6710 void
6711 pfm_init_percpu (void)
6713 static int first_time=1;
6715 * make sure no measurement is active
6716 * (may inherit programmed PMCs from EFI).
6718 pfm_clear_psr_pp();
6719 pfm_clear_psr_up();
6722 * we run with the PMU not frozen at all times
6724 pfm_unfreeze_pmu();
6726 if (first_time) {
6727 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6728 first_time=0;
6731 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6732 ia64_srlz_d();
6736 * used for debug purposes only
6738 void
6739 dump_pmu_state(const char *from)
6741 struct task_struct *task;
6742 struct pt_regs *regs;
6743 pfm_context_t *ctx;
6744 unsigned long psr, dcr, info, flags;
6745 int i, this_cpu;
6747 local_irq_save(flags);
6749 this_cpu = smp_processor_id();
6750 regs = task_pt_regs(current);
6751 info = PFM_CPUINFO_GET();
6752 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6754 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6755 local_irq_restore(flags);
6756 return;
6759 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6760 this_cpu,
6761 from,
6762 task_pid_nr(current),
6763 regs->cr_iip,
6764 current->comm);
6766 task = GET_PMU_OWNER();
6767 ctx = GET_PMU_CTX();
6769 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6771 psr = pfm_get_psr();
6773 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6774 this_cpu,
6775 ia64_get_pmc(0),
6776 psr & IA64_PSR_PP ? 1 : 0,
6777 psr & IA64_PSR_UP ? 1 : 0,
6778 dcr & IA64_DCR_PP ? 1 : 0,
6779 info,
6780 ia64_psr(regs)->up,
6781 ia64_psr(regs)->pp);
6783 ia64_psr(regs)->up = 0;
6784 ia64_psr(regs)->pp = 0;
6786 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6787 if (PMC_IS_IMPL(i) == 0) continue;
6788 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6791 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6792 if (PMD_IS_IMPL(i) == 0) continue;
6793 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6796 if (ctx) {
6797 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6798 this_cpu,
6799 ctx->ctx_state,
6800 ctx->ctx_smpl_vaddr,
6801 ctx->ctx_smpl_hdr,
6802 ctx->ctx_msgq_head,
6803 ctx->ctx_msgq_tail,
6804 ctx->ctx_saved_psr_up);
6806 local_irq_restore(flags);
6810 * called from process.c:copy_thread(). task is new child.
6812 void
6813 pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6815 struct thread_struct *thread;
6817 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6819 thread = &task->thread;
6822 * cut links inherited from parent (current)
6824 thread->pfm_context = NULL;
6826 PFM_SET_WORK_PENDING(task, 0);
6829 * the psr bits are already set properly in copy_threads()
6832 #else /* !CONFIG_PERFMON */
6833 asmlinkage long
6834 sys_perfmonctl (int fd, int cmd, void *arg, int count)
6836 return -ENOSYS;
6838 #endif /* CONFIG_PERFMON */