2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include <linux/slab.h>
13 #include <linux/kref.h>
14 #include "trace_stat.h"
18 /* A cpu workqueue thread */
19 struct cpu_workqueue_stats
{
20 struct list_head list
;
24 /* Can be inserted from interrupt or user context, need to be atomic */
27 * Don't need to be atomic, works are serialized in a single workqueue thread
30 unsigned int executed
;
33 /* List of workqueue threads on one cpu */
34 struct workqueue_global_stats
{
35 struct list_head list
;
39 /* Don't need a global lock because allocated before the workqueues, and
42 static DEFINE_PER_CPU(struct workqueue_global_stats
, all_workqueue_stat
);
43 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
45 static void cpu_workqueue_stat_free(struct kref
*kref
)
47 kfree(container_of(kref
, struct cpu_workqueue_stats
, kref
));
50 /* Insertion of a work */
52 probe_workqueue_insertion(struct task_struct
*wq_thread
,
53 struct work_struct
*work
)
55 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
56 struct cpu_workqueue_stats
*node
;
59 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
60 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
61 if (node
->pid
== wq_thread
->pid
) {
62 atomic_inc(&node
->inserted
);
66 pr_debug("trace_workqueue: entry not found\n");
68 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
71 /* Execution of a work */
73 probe_workqueue_execution(struct task_struct
*wq_thread
,
74 struct work_struct
*work
)
76 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
77 struct cpu_workqueue_stats
*node
;
80 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
81 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
82 if (node
->pid
== wq_thread
->pid
) {
87 pr_debug("trace_workqueue: entry not found\n");
89 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
92 /* Creation of a cpu workqueue thread */
93 static void probe_workqueue_creation(struct task_struct
*wq_thread
, int cpu
)
95 struct cpu_workqueue_stats
*cws
;
100 /* Workqueues are sometimes created in atomic context */
101 cws
= kzalloc(sizeof(struct cpu_workqueue_stats
), GFP_ATOMIC
);
103 pr_warning("trace_workqueue: not enough memory\n");
106 INIT_LIST_HEAD(&cws
->list
);
107 kref_init(&cws
->kref
);
109 cws
->pid
= wq_thread
->pid
;
111 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
112 list_add_tail(&cws
->list
, &workqueue_cpu_stat(cpu
)->list
);
113 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
116 /* Destruction of a cpu workqueue thread */
117 static void probe_workqueue_destruction(struct task_struct
*wq_thread
)
119 /* Workqueue only execute on one cpu */
120 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
121 struct cpu_workqueue_stats
*node
, *next
;
124 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
125 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
127 if (node
->pid
== wq_thread
->pid
) {
128 list_del(&node
->list
);
129 kref_put(&node
->kref
, cpu_workqueue_stat_free
);
134 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
136 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
140 static struct cpu_workqueue_stats
*workqueue_stat_start_cpu(int cpu
)
143 struct cpu_workqueue_stats
*ret
= NULL
;
146 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
148 if (!list_empty(&workqueue_cpu_stat(cpu
)->list
)) {
149 ret
= list_entry(workqueue_cpu_stat(cpu
)->list
.next
,
150 struct cpu_workqueue_stats
, list
);
151 kref_get(&ret
->kref
);
154 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
159 static void *workqueue_stat_start(struct tracer_stat
*trace
)
164 for_each_possible_cpu(cpu
) {
165 ret
= workqueue_stat_start_cpu(cpu
);
172 static void *workqueue_stat_next(void *prev
, int idx
)
174 struct cpu_workqueue_stats
*prev_cws
= prev
;
175 struct cpu_workqueue_stats
*ret
;
176 int cpu
= prev_cws
->cpu
;
179 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
180 if (list_is_last(&prev_cws
->list
, &workqueue_cpu_stat(cpu
)->list
)) {
181 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
183 cpu
= cpumask_next(cpu
, cpu_possible_mask
);
184 if (cpu
>= nr_cpu_ids
)
186 } while (!(ret
= workqueue_stat_start_cpu(cpu
)));
189 ret
= list_entry(prev_cws
->list
.next
,
190 struct cpu_workqueue_stats
, list
);
191 kref_get(&ret
->kref
);
193 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
198 static int workqueue_stat_show(struct seq_file
*s
, void *p
)
200 struct cpu_workqueue_stats
*cws
= p
;
202 struct task_struct
*tsk
;
204 pid
= find_get_pid(cws
->pid
);
206 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
208 seq_printf(s
, "%3d %6d %6u %s\n", cws
->cpu
,
209 atomic_read(&cws
->inserted
), cws
->executed
,
211 put_task_struct(tsk
);
219 static void workqueue_stat_release(void *stat
)
221 struct cpu_workqueue_stats
*node
= stat
;
223 kref_put(&node
->kref
, cpu_workqueue_stat_free
);
226 static int workqueue_stat_headers(struct seq_file
*s
)
228 seq_printf(s
, "# CPU INSERTED EXECUTED NAME\n");
229 seq_printf(s
, "# | | | |\n");
233 struct tracer_stat workqueue_stats __read_mostly
= {
234 .name
= "workqueues",
235 .stat_start
= workqueue_stat_start
,
236 .stat_next
= workqueue_stat_next
,
237 .stat_show
= workqueue_stat_show
,
238 .stat_release
= workqueue_stat_release
,
239 .stat_headers
= workqueue_stat_headers
243 int __init
stat_workqueue_init(void)
245 if (register_stat_tracer(&workqueue_stats
)) {
246 pr_warning("Unable to register workqueue stat tracer\n");
252 fs_initcall(stat_workqueue_init
);
255 * Workqueues are created very early, just after pre-smp initcalls.
256 * So we must register our tracepoints at this stage.
258 int __init
trace_workqueue_early_init(void)
262 ret
= register_trace_workqueue_insertion(probe_workqueue_insertion
);
266 ret
= register_trace_workqueue_execution(probe_workqueue_execution
);
270 ret
= register_trace_workqueue_creation(probe_workqueue_creation
);
274 ret
= register_trace_workqueue_destruction(probe_workqueue_destruction
);
278 for_each_possible_cpu(cpu
) {
279 spin_lock_init(&workqueue_cpu_stat(cpu
)->lock
);
280 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu
)->list
);
286 unregister_trace_workqueue_creation(probe_workqueue_creation
);
288 unregister_trace_workqueue_execution(probe_workqueue_execution
);
290 unregister_trace_workqueue_insertion(probe_workqueue_insertion
);
292 pr_warning("trace_workqueue: unable to trace workqueues\n");
296 early_initcall(trace_workqueue_early_init
);