MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / kernel / profile.c
blobcab14764cea0a62459094c2593eed83bd6a67884
1 /*
2 * linux/kernel/profile.c
3 */
5 #include <linux/config.h>
6 #include <linux/module.h>
7 #include <linux/profile.h>
8 #include <linux/bootmem.h>
9 #include <linux/notifier.h>
10 #include <linux/mm.h>
11 #include <linux/cpumask.h>
12 #include <linux/profile.h>
13 #include <asm/sections.h>
15 static atomic_t *prof_buffer;
16 static unsigned long prof_len, prof_shift;
17 static int prof_on;
18 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
20 static int __init profile_setup(char * str)
22 int par;
24 if (!strncmp(str, "schedule", 8)) {
25 prof_on = 2;
26 printk(KERN_INFO "kernel schedule profiling enabled\n");
27 if (str[7] == ',')
28 str += 8;
30 if (get_option(&str,&par)) {
31 prof_shift = par;
32 prof_on = 1;
33 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
34 prof_shift);
36 return 1;
38 __setup("profile=", profile_setup);
41 void __init profile_init(void)
43 if (!prof_on)
44 return;
46 /* only text is profiled */
47 prof_len = (_etext - _stext) >> prof_shift;
48 prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t));
51 /* Profile event notifications */
53 #ifdef CONFIG_PROFILING
55 static DECLARE_RWSEM(profile_rwsem);
56 static rwlock_t handoff_lock = RW_LOCK_UNLOCKED;
57 static struct notifier_block * task_exit_notifier;
58 static struct notifier_block * task_free_notifier;
59 static struct notifier_block * munmap_notifier;
61 void profile_task_exit(struct task_struct * task)
63 down_read(&profile_rwsem);
64 notifier_call_chain(&task_exit_notifier, 0, task);
65 up_read(&profile_rwsem);
68 int profile_handoff_task(struct task_struct * task)
70 int ret;
71 read_lock(&handoff_lock);
72 ret = notifier_call_chain(&task_free_notifier, 0, task);
73 read_unlock(&handoff_lock);
74 return (ret == NOTIFY_OK) ? 1 : 0;
77 void profile_munmap(unsigned long addr)
79 down_read(&profile_rwsem);
80 notifier_call_chain(&munmap_notifier, 0, (void *)addr);
81 up_read(&profile_rwsem);
84 int task_handoff_register(struct notifier_block * n)
86 int err = -EINVAL;
88 write_lock(&handoff_lock);
89 err = notifier_chain_register(&task_free_notifier, n);
90 write_unlock(&handoff_lock);
91 return err;
94 int task_handoff_unregister(struct notifier_block * n)
96 int err = -EINVAL;
98 write_lock(&handoff_lock);
99 err = notifier_chain_unregister(&task_free_notifier, n);
100 write_unlock(&handoff_lock);
101 return err;
104 int profile_event_register(enum profile_type type, struct notifier_block * n)
106 int err = -EINVAL;
108 down_write(&profile_rwsem);
110 switch (type) {
111 case PROFILE_TASK_EXIT:
112 err = notifier_chain_register(&task_exit_notifier, n);
113 break;
114 case PROFILE_MUNMAP:
115 err = notifier_chain_register(&munmap_notifier, n);
116 break;
119 up_write(&profile_rwsem);
121 return err;
125 int profile_event_unregister(enum profile_type type, struct notifier_block * n)
127 int err = -EINVAL;
129 down_write(&profile_rwsem);
131 switch (type) {
132 case PROFILE_TASK_EXIT:
133 err = notifier_chain_unregister(&task_exit_notifier, n);
134 break;
135 case PROFILE_MUNMAP:
136 err = notifier_chain_unregister(&munmap_notifier, n);
137 break;
140 up_write(&profile_rwsem);
141 return err;
144 static struct notifier_block * profile_listeners;
145 static rwlock_t profile_lock = RW_LOCK_UNLOCKED;
147 int register_profile_notifier(struct notifier_block * nb)
149 int err;
150 write_lock_irq(&profile_lock);
151 err = notifier_chain_register(&profile_listeners, nb);
152 write_unlock_irq(&profile_lock);
153 return err;
157 int unregister_profile_notifier(struct notifier_block * nb)
159 int err;
160 write_lock_irq(&profile_lock);
161 err = notifier_chain_unregister(&profile_listeners, nb);
162 write_unlock_irq(&profile_lock);
163 return err;
167 void profile_hook(struct pt_regs * regs)
169 read_lock(&profile_lock);
170 notifier_call_chain(&profile_listeners, 0, regs);
171 read_unlock(&profile_lock);
174 EXPORT_SYMBOL_GPL(register_profile_notifier);
175 EXPORT_SYMBOL_GPL(unregister_profile_notifier);
176 EXPORT_SYMBOL_GPL(task_handoff_register);
177 EXPORT_SYMBOL_GPL(task_handoff_unregister);
179 #endif /* CONFIG_PROFILING */
181 EXPORT_SYMBOL_GPL(profile_event_register);
182 EXPORT_SYMBOL_GPL(profile_event_unregister);
184 void profile_hit(int type, void *__pc)
186 unsigned long pc;
188 if (prof_on != type || !prof_buffer)
189 return;
190 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
191 atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
194 void profile_tick(int type, struct pt_regs *regs)
196 if (type == CPU_PROFILING)
197 profile_hook(regs);
198 if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
199 profile_hit(type, (void *)profile_pc(regs));
202 #ifdef CONFIG_PROC_FS
203 #include <linux/proc_fs.h>
204 #include <asm/uaccess.h>
205 #include <asm/ptrace.h>
207 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
208 int count, int *eof, void *data)
210 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
211 if (count - len < 2)
212 return -EINVAL;
213 len += sprintf(page + len, "\n");
214 return len;
217 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
218 unsigned long count, void *data)
220 cpumask_t *mask = (cpumask_t *)data;
221 unsigned long full_count = count, err;
222 cpumask_t new_value;
224 err = cpumask_parse(buffer, count, new_value);
225 if (err)
226 return err;
228 *mask = new_value;
229 return full_count;
232 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
234 struct proc_dir_entry *entry;
236 /* create /proc/irq/prof_cpu_mask */
237 if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir)))
238 return;
239 entry->nlink = 1;
240 entry->data = (void *)&prof_cpu_mask;
241 entry->read_proc = prof_cpu_mask_read_proc;
242 entry->write_proc = prof_cpu_mask_write_proc;
246 * This function accesses profiling information. The returned data is
247 * binary: the sampling step and the actual contents of the profile
248 * buffer. Use of the program readprofile is recommended in order to
249 * get meaningful info out of these data.
251 static ssize_t
252 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
254 unsigned long p = *ppos;
255 ssize_t read;
256 char * pnt;
257 unsigned int sample_step = 1 << prof_shift;
259 if (p >= (prof_len+1)*sizeof(unsigned int))
260 return 0;
261 if (count > (prof_len+1)*sizeof(unsigned int) - p)
262 count = (prof_len+1)*sizeof(unsigned int) - p;
263 read = 0;
265 while (p < sizeof(unsigned int) && count > 0) {
266 put_user(*((char *)(&sample_step)+p),buf);
267 buf++; p++; count--; read++;
269 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
270 if (copy_to_user(buf,(void *)pnt,count))
271 return -EFAULT;
272 read += count;
273 *ppos += read;
274 return read;
278 * Writing to /proc/profile resets the counters
280 * Writing a 'profiling multiplier' value into it also re-sets the profiling
281 * interrupt frequency, on architectures that support this.
283 static ssize_t write_profile(struct file *file, const char __user *buf,
284 size_t count, loff_t *ppos)
286 #ifdef CONFIG_SMP
287 extern int setup_profiling_timer (unsigned int multiplier);
289 if (count == sizeof(int)) {
290 unsigned int multiplier;
292 if (copy_from_user(&multiplier, buf, sizeof(int)))
293 return -EFAULT;
295 if (setup_profiling_timer(multiplier))
296 return -EINVAL;
298 #endif
300 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
301 return count;
304 static struct file_operations proc_profile_operations = {
305 .read = read_profile,
306 .write = write_profile,
309 static int __init create_proc_profile(void)
311 struct proc_dir_entry *entry;
313 if (!prof_on)
314 return 0;
315 if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL)))
316 return 0;
317 entry->proc_fops = &proc_profile_operations;
318 entry->size = (1+prof_len) * sizeof(atomic_t);
319 return 0;
321 module_init(create_proc_profile);
322 #endif /* CONFIG_PROC_FS */