2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats
{
18 struct list_head list
;
19 /* Useful to know if we print the cpu headers */
23 /* Can be inserted from interrupt or user context, need to be atomic */
26 * Don't need to be atomic, works are serialized in a single workqueue thread
29 unsigned int executed
;
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats
{
34 struct list_head list
;
38 /* Don't need a global lock because allocated before the workqueues, and
41 static DEFINE_PER_CPU(struct workqueue_global_stats
, all_workqueue_stat
);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44 /* Insertion of a work */
46 probe_workqueue_insertion(struct task_struct
*wq_thread
,
47 struct work_struct
*work
)
49 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
50 struct cpu_workqueue_stats
*node
, *next
;
53 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
54 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
56 if (node
->pid
== wq_thread
->pid
) {
57 atomic_inc(&node
->inserted
);
61 pr_debug("trace_workqueue: entry not found\n");
63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
66 /* Execution of a work */
68 probe_workqueue_execution(struct task_struct
*wq_thread
,
69 struct work_struct
*work
)
71 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
72 struct cpu_workqueue_stats
*node
, *next
;
75 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
76 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
78 if (node
->pid
== wq_thread
->pid
) {
83 pr_debug("trace_workqueue: entry not found\n");
85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
88 /* Creation of a cpu workqueue thread */
89 static void probe_workqueue_creation(struct task_struct
*wq_thread
, int cpu
)
91 struct cpu_workqueue_stats
*cws
;
96 /* Workqueues are sometimes created in atomic context */
97 cws
= kzalloc(sizeof(struct cpu_workqueue_stats
), GFP_ATOMIC
);
99 pr_warning("trace_workqueue: not enough memory\n");
102 INIT_LIST_HEAD(&cws
->list
);
105 cws
->pid
= wq_thread
->pid
;
107 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
108 if (list_empty(&workqueue_cpu_stat(cpu
)->list
))
109 cws
->first_entry
= true;
110 list_add_tail(&cws
->list
, &workqueue_cpu_stat(cpu
)->list
);
111 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
114 /* Destruction of a cpu workqueue thread */
115 static void probe_workqueue_destruction(struct task_struct
*wq_thread
)
117 /* Workqueue only execute on one cpu */
118 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
119 struct cpu_workqueue_stats
*node
, *next
;
122 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
123 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
125 if (node
->pid
== wq_thread
->pid
) {
126 list_del(&node
->list
);
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
134 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
138 static struct cpu_workqueue_stats
*workqueue_stat_start_cpu(int cpu
)
141 struct cpu_workqueue_stats
*ret
= NULL
;
144 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
146 if (!list_empty(&workqueue_cpu_stat(cpu
)->list
))
147 ret
= list_entry(workqueue_cpu_stat(cpu
)->list
.next
,
148 struct cpu_workqueue_stats
, list
);
150 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
155 static void *workqueue_stat_start(void)
160 for_each_possible_cpu(cpu
) {
161 ret
= workqueue_stat_start_cpu(cpu
);
168 static void *workqueue_stat_next(void *prev
, int idx
)
170 struct cpu_workqueue_stats
*prev_cws
= prev
;
171 int cpu
= prev_cws
->cpu
;
175 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
176 if (list_is_last(&prev_cws
->list
, &workqueue_cpu_stat(cpu
)->list
)) {
177 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
179 cpu
= cpumask_next(cpu
, cpu_possible_mask
);
180 if (cpu
>= nr_cpu_ids
)
182 } while (!(ret
= workqueue_stat_start_cpu(cpu
)));
185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
187 return list_entry(prev_cws
->list
.next
, struct cpu_workqueue_stats
,
191 static int workqueue_stat_show(struct seq_file
*s
, void *p
)
193 struct cpu_workqueue_stats
*cws
= p
;
197 struct task_struct
*tsk
;
199 pid
= find_get_pid(cws
->pid
);
201 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
203 seq_printf(s
, "%3d %6d %6u %s\n", cws
->cpu
,
204 atomic_read(&cws
->inserted
), cws
->executed
,
206 put_task_struct(tsk
);
211 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
212 if (&cws
->list
== workqueue_cpu_stat(cpu
)->list
.next
)
214 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
219 static int workqueue_stat_headers(struct seq_file
*s
)
221 seq_printf(s
, "# CPU INSERTED EXECUTED NAME\n");
222 seq_printf(s
, "# | | | |\n\n");
226 struct tracer_stat workqueue_stats __read_mostly
= {
227 .name
= "workqueues",
228 .stat_start
= workqueue_stat_start
,
229 .stat_next
= workqueue_stat_next
,
230 .stat_show
= workqueue_stat_show
,
231 .stat_headers
= workqueue_stat_headers
235 int __init
stat_workqueue_init(void)
237 if (register_stat_tracer(&workqueue_stats
)) {
238 pr_warning("Unable to register workqueue stat tracer\n");
244 fs_initcall(stat_workqueue_init
);
247 * Workqueues are created very early, just after pre-smp initcalls.
248 * So we must register our tracepoints at this stage.
250 int __init
trace_workqueue_early_init(void)
254 ret
= register_trace_workqueue_insertion(probe_workqueue_insertion
);
258 ret
= register_trace_workqueue_execution(probe_workqueue_execution
);
262 ret
= register_trace_workqueue_creation(probe_workqueue_creation
);
266 ret
= register_trace_workqueue_destruction(probe_workqueue_destruction
);
270 for_each_possible_cpu(cpu
) {
271 spin_lock_init(&workqueue_cpu_stat(cpu
)->lock
);
272 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu
)->list
);
278 unregister_trace_workqueue_creation(probe_workqueue_creation
);
280 unregister_trace_workqueue_execution(probe_workqueue_execution
);
282 unregister_trace_workqueue_insertion(probe_workqueue_insertion
);
284 pr_warning("trace_workqueue: unable to trace workqueues\n");
288 early_initcall(trace_workqueue_early_init
);