Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6/mini2440.git] / kernel / trace / trace_workqueue.c
blob797201e4a1376af5987bbce519398b14de3aeada
1 /*
2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 */
9 #include <trace/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
13 #include "trace.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats {
18 struct list_head list;
19 /* Useful to know if we print the cpu headers */
20 bool first_entry;
21 int cpu;
22 pid_t pid;
23 /* Can be inserted from interrupt or user context, need to be atomic */
24 atomic_t inserted;
26 * Don't need to be atomic, works are serialized in a single workqueue thread
27 * on a single CPU.
29 unsigned int executed;
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats {
34 struct list_head list;
35 spinlock_t lock;
38 /* Don't need a global lock because allocated before the workqueues, and
39 * never freed.
41 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44 /* Insertion of a work */
45 static void
46 probe_workqueue_insertion(struct task_struct *wq_thread,
47 struct work_struct *work)
49 int cpu = cpumask_first(&wq_thread->cpus_allowed);
50 struct cpu_workqueue_stats *node, *next;
51 unsigned long flags;
53 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
54 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
55 list) {
56 if (node->pid == wq_thread->pid) {
57 atomic_inc(&node->inserted);
58 goto found;
61 pr_debug("trace_workqueue: entry not found\n");
62 found:
63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
66 /* Execution of a work */
67 static void
68 probe_workqueue_execution(struct task_struct *wq_thread,
69 struct work_struct *work)
71 int cpu = cpumask_first(&wq_thread->cpus_allowed);
72 struct cpu_workqueue_stats *node, *next;
73 unsigned long flags;
75 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
77 list) {
78 if (node->pid == wq_thread->pid) {
79 node->executed++;
80 goto found;
83 pr_debug("trace_workqueue: entry not found\n");
84 found:
85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
88 /* Creation of a cpu workqueue thread */
89 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
91 struct cpu_workqueue_stats *cws;
92 unsigned long flags;
94 WARN_ON(cpu < 0);
96 /* Workqueues are sometimes created in atomic context */
97 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
98 if (!cws) {
99 pr_warning("trace_workqueue: not enough memory\n");
100 return;
102 INIT_LIST_HEAD(&cws->list);
103 cws->cpu = cpu;
105 cws->pid = wq_thread->pid;
107 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
108 if (list_empty(&workqueue_cpu_stat(cpu)->list))
109 cws->first_entry = true;
110 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
111 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
114 /* Destruction of a cpu workqueue thread */
115 static void probe_workqueue_destruction(struct task_struct *wq_thread)
117 /* Workqueue only execute on one cpu */
118 int cpu = cpumask_first(&wq_thread->cpus_allowed);
119 struct cpu_workqueue_stats *node, *next;
120 unsigned long flags;
122 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
123 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
124 list) {
125 if (node->pid == wq_thread->pid) {
126 list_del(&node->list);
127 kfree(node);
128 goto found;
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
133 found:
134 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
138 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
140 unsigned long flags;
141 struct cpu_workqueue_stats *ret = NULL;
144 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
146 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
147 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
148 struct cpu_workqueue_stats, list);
150 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
152 return ret;
155 static void *workqueue_stat_start(void)
157 int cpu;
158 void *ret = NULL;
160 for_each_possible_cpu(cpu) {
161 ret = workqueue_stat_start_cpu(cpu);
162 if (ret)
163 return ret;
165 return NULL;
168 static void *workqueue_stat_next(void *prev, int idx)
170 struct cpu_workqueue_stats *prev_cws = prev;
171 int cpu = prev_cws->cpu;
172 unsigned long flags;
173 void *ret = NULL;
175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
176 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
178 do {
179 cpu = cpumask_next(cpu, cpu_possible_mask);
180 if (cpu >= nr_cpu_ids)
181 return NULL;
182 } while (!(ret = workqueue_stat_start_cpu(cpu)));
183 return ret;
185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
187 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
188 list);
191 static int workqueue_stat_show(struct seq_file *s, void *p)
193 struct cpu_workqueue_stats *cws = p;
194 unsigned long flags;
195 int cpu = cws->cpu;
196 struct pid *pid;
197 struct task_struct *tsk;
199 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
200 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
201 seq_printf(s, "\n");
202 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
204 pid = find_get_pid(cws->pid);
205 if (pid) {
206 tsk = get_pid_task(pid, PIDTYPE_PID);
207 if (tsk) {
208 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
209 atomic_read(&cws->inserted), cws->executed,
210 tsk->comm);
211 put_task_struct(tsk);
213 put_pid(pid);
216 return 0;
219 static int workqueue_stat_headers(struct seq_file *s)
221 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
222 seq_printf(s, "# | | | |\n");
223 return 0;
226 struct tracer_stat workqueue_stats __read_mostly = {
227 .name = "workqueues",
228 .stat_start = workqueue_stat_start,
229 .stat_next = workqueue_stat_next,
230 .stat_show = workqueue_stat_show,
231 .stat_headers = workqueue_stat_headers
235 int __init stat_workqueue_init(void)
237 if (register_stat_tracer(&workqueue_stats)) {
238 pr_warning("Unable to register workqueue stat tracer\n");
239 return 1;
242 return 0;
244 fs_initcall(stat_workqueue_init);
247 * Workqueues are created very early, just after pre-smp initcalls.
248 * So we must register our tracepoints at this stage.
250 int __init trace_workqueue_early_init(void)
252 int ret, cpu;
254 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
255 if (ret)
256 goto out;
258 ret = register_trace_workqueue_execution(probe_workqueue_execution);
259 if (ret)
260 goto no_insertion;
262 ret = register_trace_workqueue_creation(probe_workqueue_creation);
263 if (ret)
264 goto no_execution;
266 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
267 if (ret)
268 goto no_creation;
270 for_each_possible_cpu(cpu) {
271 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
272 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
275 return 0;
277 no_creation:
278 unregister_trace_workqueue_creation(probe_workqueue_creation);
279 no_execution:
280 unregister_trace_workqueue_execution(probe_workqueue_execution);
281 no_insertion:
282 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
283 out:
284 pr_warning("trace_workqueue: unable to trace workqueues\n");
286 return 1;
288 early_initcall(trace_workqueue_early_init);