2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include <linux/kref.h>
13 #include "trace_stat.h"
17 /* A cpu workqueue thread */
18 struct cpu_workqueue_stats
{
19 struct list_head list
;
23 /* Can be inserted from interrupt or user context, need to be atomic */
26 * Don't need to be atomic, works are serialized in a single workqueue thread
29 unsigned int executed
;
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats
{
34 struct list_head list
;
38 /* Don't need a global lock because allocated before the workqueues, and
41 static DEFINE_PER_CPU(struct workqueue_global_stats
, all_workqueue_stat
);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44 static void cpu_workqueue_stat_free(struct kref
*kref
)
46 kfree(container_of(kref
, struct cpu_workqueue_stats
, kref
));
49 /* Insertion of a work */
51 probe_workqueue_insertion(struct task_struct
*wq_thread
,
52 struct work_struct
*work
)
54 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
55 struct cpu_workqueue_stats
*node
;
58 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
59 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
60 if (node
->pid
== wq_thread
->pid
) {
61 atomic_inc(&node
->inserted
);
65 pr_debug("trace_workqueue: entry not found\n");
67 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
70 /* Execution of a work */
72 probe_workqueue_execution(struct task_struct
*wq_thread
,
73 struct work_struct
*work
)
75 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
76 struct cpu_workqueue_stats
*node
;
79 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
80 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
81 if (node
->pid
== wq_thread
->pid
) {
86 pr_debug("trace_workqueue: entry not found\n");
88 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
91 /* Creation of a cpu workqueue thread */
92 static void probe_workqueue_creation(struct task_struct
*wq_thread
, int cpu
)
94 struct cpu_workqueue_stats
*cws
;
99 /* Workqueues are sometimes created in atomic context */
100 cws
= kzalloc(sizeof(struct cpu_workqueue_stats
), GFP_ATOMIC
);
102 pr_warning("trace_workqueue: not enough memory\n");
105 INIT_LIST_HEAD(&cws
->list
);
106 kref_init(&cws
->kref
);
108 cws
->pid
= wq_thread
->pid
;
110 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
111 list_add_tail(&cws
->list
, &workqueue_cpu_stat(cpu
)->list
);
112 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
115 /* Destruction of a cpu workqueue thread */
116 static void probe_workqueue_destruction(struct task_struct
*wq_thread
)
118 /* Workqueue only execute on one cpu */
119 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
120 struct cpu_workqueue_stats
*node
, *next
;
123 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
124 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
126 if (node
->pid
== wq_thread
->pid
) {
127 list_del(&node
->list
);
128 kref_put(&node
->kref
, cpu_workqueue_stat_free
);
133 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
135 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
139 static struct cpu_workqueue_stats
*workqueue_stat_start_cpu(int cpu
)
142 struct cpu_workqueue_stats
*ret
= NULL
;
145 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
147 if (!list_empty(&workqueue_cpu_stat(cpu
)->list
)) {
148 ret
= list_entry(workqueue_cpu_stat(cpu
)->list
.next
,
149 struct cpu_workqueue_stats
, list
);
150 kref_get(&ret
->kref
);
153 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
158 static void *workqueue_stat_start(struct tracer_stat
*trace
)
163 for_each_possible_cpu(cpu
) {
164 ret
= workqueue_stat_start_cpu(cpu
);
171 static void *workqueue_stat_next(void *prev
, int idx
)
173 struct cpu_workqueue_stats
*prev_cws
= prev
;
174 struct cpu_workqueue_stats
*ret
;
175 int cpu
= prev_cws
->cpu
;
178 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
179 if (list_is_last(&prev_cws
->list
, &workqueue_cpu_stat(cpu
)->list
)) {
180 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
182 cpu
= cpumask_next(cpu
, cpu_possible_mask
);
183 if (cpu
>= nr_cpu_ids
)
185 } while (!(ret
= workqueue_stat_start_cpu(cpu
)));
188 ret
= list_entry(prev_cws
->list
.next
,
189 struct cpu_workqueue_stats
, list
);
190 kref_get(&ret
->kref
);
192 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
197 static int workqueue_stat_show(struct seq_file
*s
, void *p
)
199 struct cpu_workqueue_stats
*cws
= p
;
201 struct task_struct
*tsk
;
203 pid
= find_get_pid(cws
->pid
);
205 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
207 seq_printf(s
, "%3d %6d %6u %s\n", cws
->cpu
,
208 atomic_read(&cws
->inserted
), cws
->executed
,
210 put_task_struct(tsk
);
218 static void workqueue_stat_release(void *stat
)
220 struct cpu_workqueue_stats
*node
= stat
;
222 kref_put(&node
->kref
, cpu_workqueue_stat_free
);
225 static int workqueue_stat_headers(struct seq_file
*s
)
227 seq_printf(s
, "# CPU INSERTED EXECUTED NAME\n");
228 seq_printf(s
, "# | | | |\n");
232 struct tracer_stat workqueue_stats __read_mostly
= {
233 .name
= "workqueues",
234 .stat_start
= workqueue_stat_start
,
235 .stat_next
= workqueue_stat_next
,
236 .stat_show
= workqueue_stat_show
,
237 .stat_release
= workqueue_stat_release
,
238 .stat_headers
= workqueue_stat_headers
242 int __init
stat_workqueue_init(void)
244 if (register_stat_tracer(&workqueue_stats
)) {
245 pr_warning("Unable to register workqueue stat tracer\n");
251 fs_initcall(stat_workqueue_init
);
254 * Workqueues are created very early, just after pre-smp initcalls.
255 * So we must register our tracepoints at this stage.
257 int __init
trace_workqueue_early_init(void)
261 ret
= register_trace_workqueue_insertion(probe_workqueue_insertion
);
265 ret
= register_trace_workqueue_execution(probe_workqueue_execution
);
269 ret
= register_trace_workqueue_creation(probe_workqueue_creation
);
273 ret
= register_trace_workqueue_destruction(probe_workqueue_destruction
);
277 for_each_possible_cpu(cpu
) {
278 spin_lock_init(&workqueue_cpu_stat(cpu
)->lock
);
279 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu
)->list
);
285 unregister_trace_workqueue_creation(probe_workqueue_creation
);
287 unregister_trace_workqueue_execution(probe_workqueue_execution
);
289 unregister_trace_workqueue_insertion(probe_workqueue_insertion
);
291 pr_warning("trace_workqueue: unable to trace workqueues\n");
295 early_initcall(trace_workqueue_early_init
);