Merge branch 'mini2440-dev-unlikely' into mini2440-dev
[linux-2.6/mini2440.git] / kernel / trace / trace_workqueue.c
blob40cafb07dffd11e533b17ef5c7fcf6546aef00f5
1 /*
2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 */
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include <linux/kref.h>
13 #include "trace_stat.h"
14 #include "trace.h"
17 /* A cpu workqueue thread */
18 struct cpu_workqueue_stats {
19 struct list_head list;
20 struct kref kref;
21 int cpu;
22 pid_t pid;
23 /* Can be inserted from interrupt or user context, need to be atomic */
24 atomic_t inserted;
26 * Don't need to be atomic, works are serialized in a single workqueue thread
27 * on a single CPU.
29 unsigned int executed;
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats {
34 struct list_head list;
35 spinlock_t lock;
38 /* Don't need a global lock because allocated before the workqueues, and
39 * never freed.
41 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44 static void cpu_workqueue_stat_free(struct kref *kref)
46 kfree(container_of(kref, struct cpu_workqueue_stats, kref));
49 /* Insertion of a work */
50 static void
51 probe_workqueue_insertion(struct task_struct *wq_thread,
52 struct work_struct *work)
54 int cpu = cpumask_first(&wq_thread->cpus_allowed);
55 struct cpu_workqueue_stats *node;
56 unsigned long flags;
58 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
59 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60 if (node->pid == wq_thread->pid) {
61 atomic_inc(&node->inserted);
62 goto found;
65 pr_debug("trace_workqueue: entry not found\n");
66 found:
67 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
70 /* Execution of a work */
71 static void
72 probe_workqueue_execution(struct task_struct *wq_thread,
73 struct work_struct *work)
75 int cpu = cpumask_first(&wq_thread->cpus_allowed);
76 struct cpu_workqueue_stats *node;
77 unsigned long flags;
79 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
80 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
81 if (node->pid == wq_thread->pid) {
82 node->executed++;
83 goto found;
86 pr_debug("trace_workqueue: entry not found\n");
87 found:
88 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
91 /* Creation of a cpu workqueue thread */
92 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
94 struct cpu_workqueue_stats *cws;
95 unsigned long flags;
97 WARN_ON(cpu < 0);
99 /* Workqueues are sometimes created in atomic context */
100 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
101 if (!cws) {
102 pr_warning("trace_workqueue: not enough memory\n");
103 return;
105 INIT_LIST_HEAD(&cws->list);
106 kref_init(&cws->kref);
107 cws->cpu = cpu;
108 cws->pid = wq_thread->pid;
110 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
111 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
112 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
115 /* Destruction of a cpu workqueue thread */
116 static void probe_workqueue_destruction(struct task_struct *wq_thread)
118 /* Workqueue only execute on one cpu */
119 int cpu = cpumask_first(&wq_thread->cpus_allowed);
120 struct cpu_workqueue_stats *node, *next;
121 unsigned long flags;
123 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
124 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
125 list) {
126 if (node->pid == wq_thread->pid) {
127 list_del(&node->list);
128 kref_put(&node->kref, cpu_workqueue_stat_free);
129 goto found;
133 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
134 found:
135 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
139 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141 unsigned long flags;
142 struct cpu_workqueue_stats *ret = NULL;
145 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
147 if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
148 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
149 struct cpu_workqueue_stats, list);
150 kref_get(&ret->kref);
153 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
155 return ret;
158 static void *workqueue_stat_start(struct tracer_stat *trace)
160 int cpu;
161 void *ret = NULL;
163 for_each_possible_cpu(cpu) {
164 ret = workqueue_stat_start_cpu(cpu);
165 if (ret)
166 return ret;
168 return NULL;
171 static void *workqueue_stat_next(void *prev, int idx)
173 struct cpu_workqueue_stats *prev_cws = prev;
174 struct cpu_workqueue_stats *ret;
175 int cpu = prev_cws->cpu;
176 unsigned long flags;
178 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
179 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
180 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
181 do {
182 cpu = cpumask_next(cpu, cpu_possible_mask);
183 if (cpu >= nr_cpu_ids)
184 return NULL;
185 } while (!(ret = workqueue_stat_start_cpu(cpu)));
186 return ret;
187 } else {
188 ret = list_entry(prev_cws->list.next,
189 struct cpu_workqueue_stats, list);
190 kref_get(&ret->kref);
192 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
194 return ret;
197 static int workqueue_stat_show(struct seq_file *s, void *p)
199 struct cpu_workqueue_stats *cws = p;
200 struct pid *pid;
201 struct task_struct *tsk;
203 pid = find_get_pid(cws->pid);
204 if (pid) {
205 tsk = get_pid_task(pid, PIDTYPE_PID);
206 if (tsk) {
207 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
208 atomic_read(&cws->inserted), cws->executed,
209 tsk->comm);
210 put_task_struct(tsk);
212 put_pid(pid);
215 return 0;
218 static void workqueue_stat_release(void *stat)
220 struct cpu_workqueue_stats *node = stat;
222 kref_put(&node->kref, cpu_workqueue_stat_free);
225 static int workqueue_stat_headers(struct seq_file *s)
227 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
228 seq_printf(s, "# | | | |\n");
229 return 0;
232 struct tracer_stat workqueue_stats __read_mostly = {
233 .name = "workqueues",
234 .stat_start = workqueue_stat_start,
235 .stat_next = workqueue_stat_next,
236 .stat_show = workqueue_stat_show,
237 .stat_release = workqueue_stat_release,
238 .stat_headers = workqueue_stat_headers
242 int __init stat_workqueue_init(void)
244 if (register_stat_tracer(&workqueue_stats)) {
245 pr_warning("Unable to register workqueue stat tracer\n");
246 return 1;
249 return 0;
251 fs_initcall(stat_workqueue_init);
254 * Workqueues are created very early, just after pre-smp initcalls.
255 * So we must register our tracepoints at this stage.
257 int __init trace_workqueue_early_init(void)
259 int ret, cpu;
261 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
262 if (ret)
263 goto out;
265 ret = register_trace_workqueue_execution(probe_workqueue_execution);
266 if (ret)
267 goto no_insertion;
269 ret = register_trace_workqueue_creation(probe_workqueue_creation);
270 if (ret)
271 goto no_execution;
273 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
274 if (ret)
275 goto no_creation;
277 for_each_possible_cpu(cpu) {
278 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
279 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
282 return 0;
284 no_creation:
285 unregister_trace_workqueue_creation(probe_workqueue_creation);
286 no_execution:
287 unregister_trace_workqueue_execution(probe_workqueue_execution);
288 no_insertion:
289 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
290 out:
291 pr_warning("trace_workqueue: unable to trace workqueues\n");
293 return 1;
295 early_initcall(trace_workqueue_early_init);