MIPS: Add named alloc functions to OCTEON boot monitor memory allocator.
[linux-2.6/mini2440.git] / kernel / trace / trace_workqueue.c
blob97fcea4acce156228e29490345373938d92277d8
1 /*
2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
6 */
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
13 #include "trace.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats {
18 struct list_head list;
19 int cpu;
20 pid_t pid;
21 /* Can be inserted from interrupt or user context, need to be atomic */
22 atomic_t inserted;
24 * Don't need to be atomic, works are serialized in a single workqueue thread
25 * on a single CPU.
27 unsigned int executed;
30 /* List of workqueue threads on one cpu */
31 struct workqueue_global_stats {
32 struct list_head list;
33 spinlock_t lock;
36 /* Don't need a global lock because allocated before the workqueues, and
37 * never freed.
39 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
40 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
42 /* Insertion of a work */
43 static void
44 probe_workqueue_insertion(struct task_struct *wq_thread,
45 struct work_struct *work)
47 int cpu = cpumask_first(&wq_thread->cpus_allowed);
48 struct cpu_workqueue_stats *node;
49 unsigned long flags;
51 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
52 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
53 if (node->pid == wq_thread->pid) {
54 atomic_inc(&node->inserted);
55 goto found;
58 pr_debug("trace_workqueue: entry not found\n");
59 found:
60 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
63 /* Execution of a work */
64 static void
65 probe_workqueue_execution(struct task_struct *wq_thread,
66 struct work_struct *work)
68 int cpu = cpumask_first(&wq_thread->cpus_allowed);
69 struct cpu_workqueue_stats *node;
70 unsigned long flags;
72 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
73 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
74 if (node->pid == wq_thread->pid) {
75 node->executed++;
76 goto found;
79 pr_debug("trace_workqueue: entry not found\n");
80 found:
81 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
84 /* Creation of a cpu workqueue thread */
85 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
87 struct cpu_workqueue_stats *cws;
88 unsigned long flags;
90 WARN_ON(cpu < 0);
92 /* Workqueues are sometimes created in atomic context */
93 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
94 if (!cws) {
95 pr_warning("trace_workqueue: not enough memory\n");
96 return;
98 INIT_LIST_HEAD(&cws->list);
99 cws->cpu = cpu;
101 cws->pid = wq_thread->pid;
103 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
104 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
105 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
108 /* Destruction of a cpu workqueue thread */
109 static void probe_workqueue_destruction(struct task_struct *wq_thread)
111 /* Workqueue only execute on one cpu */
112 int cpu = cpumask_first(&wq_thread->cpus_allowed);
113 struct cpu_workqueue_stats *node, *next;
114 unsigned long flags;
116 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
117 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
118 list) {
119 if (node->pid == wq_thread->pid) {
120 list_del(&node->list);
121 kfree(node);
122 goto found;
126 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
127 found:
128 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
132 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
134 unsigned long flags;
135 struct cpu_workqueue_stats *ret = NULL;
138 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
140 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
141 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
142 struct cpu_workqueue_stats, list);
144 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
146 return ret;
149 static void *workqueue_stat_start(struct tracer_stat *trace)
151 int cpu;
152 void *ret = NULL;
154 for_each_possible_cpu(cpu) {
155 ret = workqueue_stat_start_cpu(cpu);
156 if (ret)
157 return ret;
159 return NULL;
162 static void *workqueue_stat_next(void *prev, int idx)
164 struct cpu_workqueue_stats *prev_cws = prev;
165 int cpu = prev_cws->cpu;
166 unsigned long flags;
167 void *ret = NULL;
169 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
170 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
171 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
172 do {
173 cpu = cpumask_next(cpu, cpu_possible_mask);
174 if (cpu >= nr_cpu_ids)
175 return NULL;
176 } while (!(ret = workqueue_stat_start_cpu(cpu)));
177 return ret;
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
181 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
182 list);
185 static int workqueue_stat_show(struct seq_file *s, void *p)
187 struct cpu_workqueue_stats *cws = p;
188 struct pid *pid;
189 struct task_struct *tsk;
191 pid = find_get_pid(cws->pid);
192 if (pid) {
193 tsk = get_pid_task(pid, PIDTYPE_PID);
194 if (tsk) {
195 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
196 atomic_read(&cws->inserted), cws->executed,
197 tsk->comm);
198 put_task_struct(tsk);
200 put_pid(pid);
203 return 0;
206 static int workqueue_stat_headers(struct seq_file *s)
208 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
209 seq_printf(s, "# | | | |\n");
210 return 0;
213 struct tracer_stat workqueue_stats __read_mostly = {
214 .name = "workqueues",
215 .stat_start = workqueue_stat_start,
216 .stat_next = workqueue_stat_next,
217 .stat_show = workqueue_stat_show,
218 .stat_headers = workqueue_stat_headers
222 int __init stat_workqueue_init(void)
224 if (register_stat_tracer(&workqueue_stats)) {
225 pr_warning("Unable to register workqueue stat tracer\n");
226 return 1;
229 return 0;
231 fs_initcall(stat_workqueue_init);
234 * Workqueues are created very early, just after pre-smp initcalls.
235 * So we must register our tracepoints at this stage.
237 int __init trace_workqueue_early_init(void)
239 int ret, cpu;
241 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
242 if (ret)
243 goto out;
245 ret = register_trace_workqueue_execution(probe_workqueue_execution);
246 if (ret)
247 goto no_insertion;
249 ret = register_trace_workqueue_creation(probe_workqueue_creation);
250 if (ret)
251 goto no_execution;
253 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
254 if (ret)
255 goto no_creation;
257 for_each_possible_cpu(cpu) {
258 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
259 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
262 return 0;
264 no_creation:
265 unregister_trace_workqueue_creation(probe_workqueue_creation);
266 no_execution:
267 unregister_trace_workqueue_execution(probe_workqueue_execution);
268 no_insertion:
269 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
270 out:
271 pr_warning("trace_workqueue: unable to trace workqueues\n");
273 return 1;
275 early_initcall(trace_workqueue_early_init);