2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/events/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats
{
18 struct list_head list
;
21 /* Can be inserted from interrupt or user context, need to be atomic */
24 * Don't need to be atomic, works are serialized in a single workqueue thread
27 unsigned int executed
;
30 /* List of workqueue threads on one cpu */
31 struct workqueue_global_stats
{
32 struct list_head list
;
36 /* Don't need a global lock because allocated before the workqueues, and
39 static DEFINE_PER_CPU(struct workqueue_global_stats
, all_workqueue_stat
);
40 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
42 /* Insertion of a work */
44 probe_workqueue_insertion(struct task_struct
*wq_thread
,
45 struct work_struct
*work
)
47 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
48 struct cpu_workqueue_stats
*node
;
51 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
52 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
53 if (node
->pid
== wq_thread
->pid
) {
54 atomic_inc(&node
->inserted
);
58 pr_debug("trace_workqueue: entry not found\n");
60 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
63 /* Execution of a work */
65 probe_workqueue_execution(struct task_struct
*wq_thread
,
66 struct work_struct
*work
)
68 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
69 struct cpu_workqueue_stats
*node
;
72 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
73 list_for_each_entry(node
, &workqueue_cpu_stat(cpu
)->list
, list
) {
74 if (node
->pid
== wq_thread
->pid
) {
79 pr_debug("trace_workqueue: entry not found\n");
81 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
84 /* Creation of a cpu workqueue thread */
85 static void probe_workqueue_creation(struct task_struct
*wq_thread
, int cpu
)
87 struct cpu_workqueue_stats
*cws
;
92 /* Workqueues are sometimes created in atomic context */
93 cws
= kzalloc(sizeof(struct cpu_workqueue_stats
), GFP_ATOMIC
);
95 pr_warning("trace_workqueue: not enough memory\n");
98 INIT_LIST_HEAD(&cws
->list
);
101 cws
->pid
= wq_thread
->pid
;
103 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
104 list_add_tail(&cws
->list
, &workqueue_cpu_stat(cpu
)->list
);
105 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
108 /* Destruction of a cpu workqueue thread */
109 static void probe_workqueue_destruction(struct task_struct
*wq_thread
)
111 /* Workqueue only execute on one cpu */
112 int cpu
= cpumask_first(&wq_thread
->cpus_allowed
);
113 struct cpu_workqueue_stats
*node
, *next
;
116 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
117 list_for_each_entry_safe(node
, next
, &workqueue_cpu_stat(cpu
)->list
,
119 if (node
->pid
== wq_thread
->pid
) {
120 list_del(&node
->list
);
126 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
128 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
132 static struct cpu_workqueue_stats
*workqueue_stat_start_cpu(int cpu
)
135 struct cpu_workqueue_stats
*ret
= NULL
;
138 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
140 if (!list_empty(&workqueue_cpu_stat(cpu
)->list
))
141 ret
= list_entry(workqueue_cpu_stat(cpu
)->list
.next
,
142 struct cpu_workqueue_stats
, list
);
144 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
149 static void *workqueue_stat_start(struct tracer_stat
*trace
)
154 for_each_possible_cpu(cpu
) {
155 ret
= workqueue_stat_start_cpu(cpu
);
162 static void *workqueue_stat_next(void *prev
, int idx
)
164 struct cpu_workqueue_stats
*prev_cws
= prev
;
165 int cpu
= prev_cws
->cpu
;
169 spin_lock_irqsave(&workqueue_cpu_stat(cpu
)->lock
, flags
);
170 if (list_is_last(&prev_cws
->list
, &workqueue_cpu_stat(cpu
)->list
)) {
171 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
173 cpu
= cpumask_next(cpu
, cpu_possible_mask
);
174 if (cpu
>= nr_cpu_ids
)
176 } while (!(ret
= workqueue_stat_start_cpu(cpu
)));
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu
)->lock
, flags
);
181 return list_entry(prev_cws
->list
.next
, struct cpu_workqueue_stats
,
185 static int workqueue_stat_show(struct seq_file
*s
, void *p
)
187 struct cpu_workqueue_stats
*cws
= p
;
189 struct task_struct
*tsk
;
191 pid
= find_get_pid(cws
->pid
);
193 tsk
= get_pid_task(pid
, PIDTYPE_PID
);
195 seq_printf(s
, "%3d %6d %6u %s\n", cws
->cpu
,
196 atomic_read(&cws
->inserted
), cws
->executed
,
198 put_task_struct(tsk
);
206 static int workqueue_stat_headers(struct seq_file
*s
)
208 seq_printf(s
, "# CPU INSERTED EXECUTED NAME\n");
209 seq_printf(s
, "# | | | |\n");
213 struct tracer_stat workqueue_stats __read_mostly
= {
214 .name
= "workqueues",
215 .stat_start
= workqueue_stat_start
,
216 .stat_next
= workqueue_stat_next
,
217 .stat_show
= workqueue_stat_show
,
218 .stat_headers
= workqueue_stat_headers
222 int __init
stat_workqueue_init(void)
224 if (register_stat_tracer(&workqueue_stats
)) {
225 pr_warning("Unable to register workqueue stat tracer\n");
231 fs_initcall(stat_workqueue_init
);
234 * Workqueues are created very early, just after pre-smp initcalls.
235 * So we must register our tracepoints at this stage.
237 int __init
trace_workqueue_early_init(void)
241 ret
= register_trace_workqueue_insertion(probe_workqueue_insertion
);
245 ret
= register_trace_workqueue_execution(probe_workqueue_execution
);
249 ret
= register_trace_workqueue_creation(probe_workqueue_creation
);
253 ret
= register_trace_workqueue_destruction(probe_workqueue_destruction
);
257 for_each_possible_cpu(cpu
) {
258 spin_lock_init(&workqueue_cpu_stat(cpu
)->lock
);
259 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu
)->list
);
265 unregister_trace_workqueue_creation(probe_workqueue_creation
);
267 unregister_trace_workqueue_execution(probe_workqueue_execution
);
269 unregister_trace_workqueue_insertion(probe_workqueue_insertion
);
271 pr_warning("trace_workqueue: unable to trace workqueues\n");
275 early_initcall(trace_workqueue_early_init
);