2 * linux/kernel/profile.c
3 * Simple profiling. Manages a direct-mapped profile hit count buffer,
4 * with configurable resolution, support for restricting the cpus on
5 * which profiling is done, and switching between cpu time and
6 * schedule() calls via kernel command line parameters passed at boot.
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/profile.h>
19 #include <linux/bootmem.h>
20 #include <linux/notifier.h>
22 #include <linux/cpumask.h>
23 #include <linux/cpu.h>
24 #include <linux/profile.h>
25 #include <linux/highmem.h>
26 #include <asm/sections.h>
27 #include <asm/semaphore.h>
32 #define PROFILE_GRPSHIFT 3
33 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
34 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
35 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
37 static atomic_t
*prof_buffer
;
38 static unsigned long prof_len
, prof_shift
;
40 static cpumask_t prof_cpu_mask
= CPU_MASK_ALL
;
42 static DEFINE_PER_CPU(struct profile_hit
*[2], cpu_profile_hits
);
43 static DEFINE_PER_CPU(int, cpu_profile_flip
);
44 static DECLARE_MUTEX(profile_flip_mutex
);
45 #endif /* CONFIG_SMP */
47 static int __init
profile_setup(char * str
)
51 if (!strncmp(str
, "schedule", 8)) {
52 prof_on
= SCHED_PROFILING
;
53 printk(KERN_INFO
"kernel schedule profiling enabled\n");
57 if (get_option(&str
,&par
)) {
59 prof_on
= CPU_PROFILING
;
60 printk(KERN_INFO
"kernel profiling enabled (shift: %ld)\n",
65 __setup("profile=", profile_setup
);
68 void __init
profile_init(void)
73 /* only text is profiled */
74 prof_len
= (_etext
- _stext
) >> prof_shift
;
75 prof_buffer
= alloc_bootmem(prof_len
*sizeof(atomic_t
));
78 /* Profile event notifications */
80 #ifdef CONFIG_PROFILING
82 static DECLARE_RWSEM(profile_rwsem
);
83 static rwlock_t handoff_lock
= RW_LOCK_UNLOCKED
;
84 static struct notifier_block
* task_exit_notifier
;
85 static struct notifier_block
* task_free_notifier
;
86 static struct notifier_block
* munmap_notifier
;
88 void profile_task_exit(struct task_struct
* task
)
90 down_read(&profile_rwsem
);
91 notifier_call_chain(&task_exit_notifier
, 0, task
);
92 up_read(&profile_rwsem
);
95 int profile_handoff_task(struct task_struct
* task
)
98 read_lock(&handoff_lock
);
99 ret
= notifier_call_chain(&task_free_notifier
, 0, task
);
100 read_unlock(&handoff_lock
);
101 return (ret
== NOTIFY_OK
) ? 1 : 0;
104 void profile_munmap(unsigned long addr
)
106 down_read(&profile_rwsem
);
107 notifier_call_chain(&munmap_notifier
, 0, (void *)addr
);
108 up_read(&profile_rwsem
);
111 int task_handoff_register(struct notifier_block
* n
)
115 write_lock(&handoff_lock
);
116 err
= notifier_chain_register(&task_free_notifier
, n
);
117 write_unlock(&handoff_lock
);
121 int task_handoff_unregister(struct notifier_block
* n
)
125 write_lock(&handoff_lock
);
126 err
= notifier_chain_unregister(&task_free_notifier
, n
);
127 write_unlock(&handoff_lock
);
131 int profile_event_register(enum profile_type type
, struct notifier_block
* n
)
135 down_write(&profile_rwsem
);
138 case PROFILE_TASK_EXIT
:
139 err
= notifier_chain_register(&task_exit_notifier
, n
);
142 err
= notifier_chain_register(&munmap_notifier
, n
);
146 up_write(&profile_rwsem
);
152 int profile_event_unregister(enum profile_type type
, struct notifier_block
* n
)
156 down_write(&profile_rwsem
);
159 case PROFILE_TASK_EXIT
:
160 err
= notifier_chain_unregister(&task_exit_notifier
, n
);
163 err
= notifier_chain_unregister(&munmap_notifier
, n
);
167 up_write(&profile_rwsem
);
171 static struct notifier_block
* profile_listeners
;
172 static rwlock_t profile_lock
= RW_LOCK_UNLOCKED
;
174 int register_profile_notifier(struct notifier_block
* nb
)
177 write_lock_irq(&profile_lock
);
178 err
= notifier_chain_register(&profile_listeners
, nb
);
179 write_unlock_irq(&profile_lock
);
184 int unregister_profile_notifier(struct notifier_block
* nb
)
187 write_lock_irq(&profile_lock
);
188 err
= notifier_chain_unregister(&profile_listeners
, nb
);
189 write_unlock_irq(&profile_lock
);
194 void profile_hook(struct pt_regs
* regs
)
196 read_lock(&profile_lock
);
197 notifier_call_chain(&profile_listeners
, 0, regs
);
198 read_unlock(&profile_lock
);
201 EXPORT_SYMBOL_GPL(register_profile_notifier
);
202 EXPORT_SYMBOL_GPL(unregister_profile_notifier
);
203 EXPORT_SYMBOL_GPL(task_handoff_register
);
204 EXPORT_SYMBOL_GPL(task_handoff_unregister
);
206 #endif /* CONFIG_PROFILING */
208 EXPORT_SYMBOL_GPL(profile_event_register
);
209 EXPORT_SYMBOL_GPL(profile_event_unregister
);
213 * Each cpu has a pair of open-addressed hashtables for pending
214 * profile hits. read_profile() IPI's all cpus to request them
215 * to flip buffers and flushes their contents to prof_buffer itself.
216 * Flip requests are serialized by the profile_flip_mutex. The sole
217 * use of having a second hashtable is for avoiding cacheline
218 * contention that would otherwise happen during flushes of pending
219 * profile hits required for the accuracy of reported profile hits
220 * and so resurrect the interrupt livelock issue.
222 * The open-addressed hashtables are indexed by profile buffer slot
223 * and hold the number of pending hits to that profile buffer slot on
224 * a cpu in an entry. When the hashtable overflows, all pending hits
225 * are accounted to their corresponding profile buffer slots with
226 * atomic_add() and the hashtable emptied. As numerous pending hits
227 * may be accounted to a profile buffer slot in a hashtable entry,
228 * this amortizes a number of atomic profile buffer increments likely
229 * to be far larger than the number of entries in the hashtable,
230 * particularly given that the number of distinct profile buffer
231 * positions to which hits are accounted during short intervals (e.g.
232 * several seconds) is usually very small. Exclusion from buffer
233 * flipping is provided by interrupt disablement (note that for
234 * SCHED_PROFILING profile_hit() may be called from process context).
235 * The hash function is meant to be lightweight as opposed to strong,
236 * and was vaguely inspired by ppc64 firmware-supported inverted
237 * pagetable hash functions, but uses a full hashtable full of finite
238 * collision chains, not just pairs of them.
242 static void __profile_flip_buffers(void *unused
)
244 int cpu
= smp_processor_id();
246 per_cpu(cpu_profile_flip
, cpu
) = !per_cpu(cpu_profile_flip
, cpu
);
249 static void profile_flip_buffers(void)
253 down(&profile_flip_mutex
);
254 j
= per_cpu(cpu_profile_flip
, get_cpu());
256 on_each_cpu(__profile_flip_buffers
, NULL
, 0, 1);
257 for_each_online_cpu(cpu
) {
258 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[j
];
259 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
265 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
266 hits
[i
].hits
= hits
[i
].pc
= 0;
269 up(&profile_flip_mutex
);
272 static void profile_discard_flip_buffers(void)
276 down(&profile_flip_mutex
);
277 i
= per_cpu(cpu_profile_flip
, get_cpu());
279 on_each_cpu(__profile_flip_buffers
, NULL
, 0, 1);
280 for_each_online_cpu(cpu
) {
281 struct profile_hit
*hits
= per_cpu(cpu_profile_hits
, cpu
)[i
];
282 memset(hits
, 0, NR_PROFILE_HIT
*sizeof(struct profile_hit
));
284 up(&profile_flip_mutex
);
287 void profile_hit(int type
, void *__pc
)
289 unsigned long primary
, secondary
, flags
, pc
= (unsigned long)__pc
;
291 struct profile_hit
*hits
;
293 if (prof_on
!= type
|| !prof_buffer
)
295 pc
= min((pc
- (unsigned long)_stext
) >> prof_shift
, prof_len
- 1);
296 i
= primary
= (pc
& (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
297 secondary
= (~(pc
<< 1) & (NR_PROFILE_GRP
- 1)) << PROFILE_GRPSHIFT
;
299 hits
= per_cpu(cpu_profile_hits
, cpu
)[per_cpu(cpu_profile_flip
, cpu
)];
304 local_irq_save(flags
);
306 for (j
= 0; j
< PROFILE_GRPSZ
; ++j
) {
307 if (hits
[i
+ j
].pc
== pc
) {
310 } else if (!hits
[i
+ j
].hits
) {
312 hits
[i
+ j
].hits
= 1;
316 i
= (i
+ secondary
) & (NR_PROFILE_HIT
- 1);
317 } while (i
!= primary
);
318 atomic_inc(&prof_buffer
[pc
]);
319 for (i
= 0; i
< NR_PROFILE_HIT
; ++i
) {
320 atomic_add(hits
[i
].hits
, &prof_buffer
[hits
[i
].pc
]);
321 hits
[i
].pc
= hits
[i
].hits
= 0;
324 local_irq_restore(flags
);
328 #ifdef CONFIG_HOTPLUG_CPU
329 static int __devinit
profile_cpu_callback(struct notifier_block
*info
,
330 unsigned long action
, void *__cpu
)
332 int node
, cpu
= (unsigned long)__cpu
;
337 node
= cpu_to_node(cpu
);
338 per_cpu(cpu_profile_flip
, cpu
) = 0;
339 if (!per_cpu(cpu_profile_hits
, cpu
)[1]) {
340 page
= alloc_pages_node(node
, GFP_KERNEL
, 0);
343 clear_highpage(page
);
344 per_cpu(cpu_profile_hits
, cpu
)[1] = page_address(page
);
346 if (!per_cpu(cpu_profile_hits
, cpu
)[0]) {
347 page
= alloc_pages_node(node
, GFP_KERNEL
, 0);
350 clear_highpage(page
);
351 per_cpu(cpu_profile_hits
, cpu
)[0] = page_address(page
);
355 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
356 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
360 cpu_set(cpu
, prof_cpu_mask
);
362 case CPU_UP_CANCELED
:
364 cpu_clear(cpu
, prof_cpu_mask
);
365 if (per_cpu(cpu_profile_hits
, cpu
)[0]) {
366 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[0]);
367 per_cpu(cpu_profile_hits
, cpu
)[0] = NULL
;
370 if (per_cpu(cpu_profile_hits
, cpu
)[1]) {
371 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
372 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
379 #endif /* CONFIG_HOTPLUG_CPU */
380 #else /* !CONFIG_SMP */
381 #define profile_flip_buffers() do { } while (0)
382 #define profile_discard_flip_buffers() do { } while (0)
384 void profile_hit(int type
, void *__pc
)
388 if (prof_on
!= type
|| !prof_buffer
)
390 pc
= ((unsigned long)__pc
- (unsigned long)_stext
) >> prof_shift
;
391 atomic_inc(&prof_buffer
[min(pc
, prof_len
- 1)]);
393 #endif /* !CONFIG_SMP */
395 void profile_tick(int type
, struct pt_regs
*regs
)
397 if (type
== CPU_PROFILING
)
399 if (!user_mode(regs
) && cpu_isset(smp_processor_id(), prof_cpu_mask
))
400 profile_hit(type
, (void *)profile_pc(regs
));
403 #ifdef CONFIG_PROC_FS
404 #include <linux/proc_fs.h>
405 #include <asm/uaccess.h>
406 #include <asm/ptrace.h>
408 static int prof_cpu_mask_read_proc (char *page
, char **start
, off_t off
,
409 int count
, int *eof
, void *data
)
411 int len
= cpumask_scnprintf(page
, count
, *(cpumask_t
*)data
);
414 len
+= sprintf(page
+ len
, "\n");
418 static int prof_cpu_mask_write_proc (struct file
*file
, const char __user
*buffer
,
419 unsigned long count
, void *data
)
421 cpumask_t
*mask
= (cpumask_t
*)data
;
422 unsigned long full_count
= count
, err
;
425 err
= cpumask_parse(buffer
, count
, new_value
);
433 void create_prof_cpu_mask(struct proc_dir_entry
*root_irq_dir
)
435 struct proc_dir_entry
*entry
;
437 /* create /proc/irq/prof_cpu_mask */
438 if (!(entry
= create_proc_entry("prof_cpu_mask", 0600, root_irq_dir
)))
441 entry
->data
= (void *)&prof_cpu_mask
;
442 entry
->read_proc
= prof_cpu_mask_read_proc
;
443 entry
->write_proc
= prof_cpu_mask_write_proc
;
447 * This function accesses profiling information. The returned data is
448 * binary: the sampling step and the actual contents of the profile
449 * buffer. Use of the program readprofile is recommended in order to
450 * get meaningful info out of these data.
453 read_profile(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
455 unsigned long p
= *ppos
;
458 unsigned int sample_step
= 1 << prof_shift
;
460 profile_flip_buffers();
461 if (p
>= (prof_len
+1)*sizeof(unsigned int))
463 if (count
> (prof_len
+1)*sizeof(unsigned int) - p
)
464 count
= (prof_len
+1)*sizeof(unsigned int) - p
;
467 while (p
< sizeof(unsigned int) && count
> 0) {
468 put_user(*((char *)(&sample_step
)+p
),buf
);
469 buf
++; p
++; count
--; read
++;
471 pnt
= (char *)prof_buffer
+ p
- sizeof(atomic_t
);
472 if (copy_to_user(buf
,(void *)pnt
,count
))
480 * Writing to /proc/profile resets the counters
482 * Writing a 'profiling multiplier' value into it also re-sets the profiling
483 * interrupt frequency, on architectures that support this.
485 static ssize_t
write_profile(struct file
*file
, const char __user
*buf
,
486 size_t count
, loff_t
*ppos
)
489 extern int setup_profiling_timer (unsigned int multiplier
);
491 if (count
== sizeof(int)) {
492 unsigned int multiplier
;
494 if (copy_from_user(&multiplier
, buf
, sizeof(int)))
497 if (setup_profiling_timer(multiplier
))
501 profile_discard_flip_buffers();
502 memset(prof_buffer
, 0, prof_len
* sizeof(atomic_t
));
506 static struct file_operations proc_profile_operations
= {
507 .read
= read_profile
,
508 .write
= write_profile
,
512 static void __init
profile_nop(void *unused
)
516 static int __init
create_hash_tables(void)
520 for_each_online_cpu(cpu
) {
521 int node
= cpu_to_node(cpu
);
524 page
= alloc_pages_node(node
, GFP_KERNEL
, 0);
527 clear_highpage(page
);
528 per_cpu(cpu_profile_hits
, cpu
)[1]
529 = (struct profile_hit
*)page_address(page
);
530 page
= alloc_pages_node(node
, GFP_KERNEL
, 0);
533 clear_highpage(page
);
534 per_cpu(cpu_profile_hits
, cpu
)[0]
535 = (struct profile_hit
*)page_address(page
);
541 on_each_cpu(profile_nop
, NULL
, 0, 1);
542 for_each_online_cpu(cpu
) {
545 if (per_cpu(cpu_profile_hits
, cpu
)[0]) {
546 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[0]);
547 per_cpu(cpu_profile_hits
, cpu
)[0] = NULL
;
550 if (per_cpu(cpu_profile_hits
, cpu
)[1]) {
551 page
= virt_to_page(per_cpu(cpu_profile_hits
, cpu
)[1]);
552 per_cpu(cpu_profile_hits
, cpu
)[1] = NULL
;
559 #define create_hash_tables() ({ 0; })
562 static int __init
create_proc_profile(void)
564 struct proc_dir_entry
*entry
;
568 if (create_hash_tables())
570 if (!(entry
= create_proc_entry("profile", S_IWUSR
| S_IRUGO
, NULL
)))
572 entry
->proc_fops
= &proc_profile_operations
;
573 entry
->size
= (1+prof_len
) * sizeof(atomic_t
);
574 hotcpu_notifier(profile_cpu_callback
, 0);
577 module_init(create_proc_profile
);
578 #endif /* CONFIG_PROC_FS */