2 #ifdef CONFIG_SCHEDSTATS
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
7 #define SCHEDSTAT_VERSION 14
9 static int show_schedstat(struct seq_file
*seq
, void *v
)
12 int mask_len
= DIV_ROUND_UP(NR_CPUS
, 32) * 9;
13 char *mask_str
= kmalloc(mask_len
, GFP_KERNEL
);
18 seq_printf(seq
, "version %d\n", SCHEDSTAT_VERSION
);
19 seq_printf(seq
, "timestamp %lu\n", jiffies
);
20 for_each_online_cpu(cpu
) {
21 struct rq
*rq
= cpu_rq(cpu
);
23 struct sched_domain
*sd
;
27 /* runqueue-specific stats */
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
30 cpu
, rq
->yld_both_empty
,
31 rq
->yld_act_empty
, rq
->yld_exp_empty
, rq
->yld_count
,
32 rq
->sched_switch
, rq
->sched_count
, rq
->sched_goidle
,
33 rq
->ttwu_count
, rq
->ttwu_local
,
35 rq
->rq_sched_info
.run_delay
, rq
->rq_sched_info
.pcount
);
37 seq_printf(seq
, "\n");
40 /* domain-specific stats */
42 for_each_domain(cpu
, sd
) {
43 enum cpu_idle_type itype
;
45 cpumask_scnprintf(mask_str
, mask_len
,
46 sched_domain_span(sd
));
47 seq_printf(seq
, "domain%d %s", dcount
++, mask_str
);
48 for (itype
= CPU_IDLE
; itype
< CPU_MAX_IDLE_TYPES
;
50 seq_printf(seq
, " %u %u %u %u %u %u %u %u",
52 sd
->lb_balanced
[itype
],
54 sd
->lb_imbalance
[itype
],
56 sd
->lb_hot_gained
[itype
],
57 sd
->lb_nobusyq
[itype
],
58 sd
->lb_nobusyg
[itype
]);
61 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
62 sd
->alb_count
, sd
->alb_failed
, sd
->alb_pushed
,
63 sd
->sbe_count
, sd
->sbe_balanced
, sd
->sbe_pushed
,
64 sd
->sbf_count
, sd
->sbf_balanced
, sd
->sbf_pushed
,
65 sd
->ttwu_wake_remote
, sd
->ttwu_move_affine
,
66 sd
->ttwu_move_balance
);
75 static int schedstat_open(struct inode
*inode
, struct file
*file
)
77 unsigned int size
= PAGE_SIZE
* (1 + num_online_cpus() / 32);
78 char *buf
= kmalloc(size
, GFP_KERNEL
);
84 res
= single_open(file
, show_schedstat
, NULL
);
86 m
= file
->private_data
;
94 static const struct file_operations proc_schedstat_operations
= {
95 .open
= schedstat_open
,
98 .release
= single_release
,
101 static int __init
proc_schedstat_init(void)
103 proc_create("schedstat", 0, NULL
, &proc_schedstat_operations
);
106 module_init(proc_schedstat_init
);
109 * Expects runqueue lock to be held for atomicity of update
112 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
115 rq
->rq_sched_info
.run_delay
+= delta
;
116 rq
->rq_sched_info
.pcount
++;
121 * Expects runqueue lock to be held for atomicity of update
124 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
127 rq
->rq_cpu_time
+= delta
;
131 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
134 rq
->rq_sched_info
.run_delay
+= delta
;
136 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
137 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
138 # define schedstat_set(var, val) do { var = (val); } while (0)
139 #else /* !CONFIG_SCHEDSTATS */
141 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
144 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
147 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
149 # define schedstat_inc(rq, field) do { } while (0)
150 # define schedstat_add(rq, field, amt) do { } while (0)
151 # define schedstat_set(var, val) do { } while (0)
154 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
155 static inline void sched_info_reset_dequeued(struct task_struct
*t
)
157 t
->sched_info
.last_queued
= 0;
161 * Called when a process is dequeued from the active array and given
162 * the cpu. We should note that with the exception of interactive
163 * tasks, the expired queue will become the active queue after the active
164 * queue is empty, without explicitly dequeuing and requeuing tasks in the
165 * expired queue. (Interactive tasks may be requeued directly to the
166 * active queue, thus delaying tasks in the expired queue from running;
167 * see scheduler_tick()).
169 * Though we are interested in knowing how long it was from the *first* time a
170 * task was queued to the time that it finally hit a cpu, we call this routine
171 * from dequeue_task() to account for possible rq->clock skew across cpus. The
172 * delta taken on each cpu would annul the skew.
174 static inline void sched_info_dequeued(struct task_struct
*t
)
176 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
178 if (unlikely(sched_info_on()))
179 if (t
->sched_info
.last_queued
)
180 delta
= now
- t
->sched_info
.last_queued
;
181 sched_info_reset_dequeued(t
);
182 t
->sched_info
.run_delay
+= delta
;
184 rq_sched_info_dequeued(task_rq(t
), delta
);
188 * Called when a task finally hits the cpu. We can now calculate how
189 * long it was waiting to run. We also note when it began so that we
190 * can keep stats on how long its timeslice is.
192 static void sched_info_arrive(struct task_struct
*t
)
194 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
196 if (t
->sched_info
.last_queued
)
197 delta
= now
- t
->sched_info
.last_queued
;
198 sched_info_reset_dequeued(t
);
199 t
->sched_info
.run_delay
+= delta
;
200 t
->sched_info
.last_arrival
= now
;
201 t
->sched_info
.pcount
++;
203 rq_sched_info_arrive(task_rq(t
), delta
);
207 * Called when a process is queued into either the active or expired
208 * array. The time is noted and later used to determine how long we
209 * had to wait for us to reach the cpu. Since the expired queue will
210 * become the active queue after active queue is empty, without dequeuing
211 * and requeuing any tasks, we are interested in queuing to either. It
212 * is unusual but not impossible for tasks to be dequeued and immediately
213 * requeued in the same or another array: this can happen in sched_yield(),
214 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
217 * This function is only called from enqueue_task(), but also only updates
218 * the timestamp if it is already not set. It's assumed that
219 * sched_info_dequeued() will clear that stamp when appropriate.
221 static inline void sched_info_queued(struct task_struct
*t
)
223 if (unlikely(sched_info_on()))
224 if (!t
->sched_info
.last_queued
)
225 t
->sched_info
.last_queued
= task_rq(t
)->clock
;
229 * Called when a process ceases being the active-running process, either
230 * voluntarily or involuntarily. Now we can calculate how long we ran.
231 * Also, if the process is still in the TASK_RUNNING state, call
232 * sched_info_queued() to mark that it has now again started waiting on
235 static inline void sched_info_depart(struct task_struct
*t
)
237 unsigned long long delta
= task_rq(t
)->clock
-
238 t
->sched_info
.last_arrival
;
240 rq_sched_info_depart(task_rq(t
), delta
);
242 if (t
->state
== TASK_RUNNING
)
243 sched_info_queued(t
);
247 * Called when tasks are switched involuntarily due, typically, to expiring
248 * their time slice. (This may also be called when switching to or from
249 * the idle task.) We are only called when prev != next.
252 __sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
254 struct rq
*rq
= task_rq(prev
);
257 * prev now departs the cpu. It's not interesting to record
258 * stats about how efficient we were at scheduling the idle
261 if (prev
!= rq
->idle
)
262 sched_info_depart(prev
);
264 if (next
!= rq
->idle
)
265 sched_info_arrive(next
);
268 sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
270 if (unlikely(sched_info_on()))
271 __sched_info_switch(prev
, next
);
274 #define sched_info_queued(t) do { } while (0)
275 #define sched_info_reset_dequeued(t) do { } while (0)
276 #define sched_info_dequeued(t) do { } while (0)
277 #define sched_info_switch(t, next) do { } while (0)
278 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
281 * The following are functions that support scheduler-internal time accounting.
282 * These functions are generally called at the timer tick. None of this depends
283 * on CONFIG_SCHEDSTATS.
287 * account_group_user_time - Maintain utime for a thread group.
289 * @tsk: Pointer to task structure.
290 * @cputime: Time value by which to increment the utime field of the
291 * thread_group_cputime structure.
293 * If thread group time is being maintained, get the structure for the
294 * running CPU and update the utime field there.
296 static inline void account_group_user_time(struct task_struct
*tsk
,
299 struct thread_group_cputimer
*cputimer
;
301 /* tsk == current, ensure it is safe to use ->signal */
302 if (unlikely(tsk
->exit_state
))
305 cputimer
= &tsk
->signal
->cputimer
;
307 if (!cputimer
->running
)
310 spin_lock(&cputimer
->lock
);
311 cputimer
->cputime
.utime
=
312 cputime_add(cputimer
->cputime
.utime
, cputime
);
313 spin_unlock(&cputimer
->lock
);
317 * account_group_system_time - Maintain stime for a thread group.
319 * @tsk: Pointer to task structure.
320 * @cputime: Time value by which to increment the stime field of the
321 * thread_group_cputime structure.
323 * If thread group time is being maintained, get the structure for the
324 * running CPU and update the stime field there.
326 static inline void account_group_system_time(struct task_struct
*tsk
,
329 struct thread_group_cputimer
*cputimer
;
331 /* tsk == current, ensure it is safe to use ->signal */
332 if (unlikely(tsk
->exit_state
))
335 cputimer
= &tsk
->signal
->cputimer
;
337 if (!cputimer
->running
)
340 spin_lock(&cputimer
->lock
);
341 cputimer
->cputime
.stime
=
342 cputime_add(cputimer
->cputime
.stime
, cputime
);
343 spin_unlock(&cputimer
->lock
);
347 * account_group_exec_runtime - Maintain exec runtime for a thread group.
349 * @tsk: Pointer to task structure.
350 * @ns: Time value by which to increment the sum_exec_runtime field
351 * of the thread_group_cputime structure.
353 * If thread group time is being maintained, get the structure for the
354 * running CPU and update the sum_exec_runtime field there.
356 static inline void account_group_exec_runtime(struct task_struct
*tsk
,
357 unsigned long long ns
)
359 struct thread_group_cputimer
*cputimer
;
360 struct signal_struct
*sig
;
363 /* see __exit_signal()->task_rq_unlock_wait() */
368 cputimer
= &sig
->cputimer
;
370 if (!cputimer
->running
)
373 spin_lock(&cputimer
->lock
);
374 cputimer
->cputime
.sum_exec_runtime
+= ns
;
375 spin_unlock(&cputimer
->lock
);