2 #ifdef CONFIG_SCHEDSTATS
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
7 #define SCHEDSTAT_VERSION 15
9 static int show_schedstat(struct seq_file
*seq
, void *v
)
12 int mask_len
= DIV_ROUND_UP(NR_CPUS
, 32) * 9;
13 char *mask_str
= kmalloc(mask_len
, GFP_KERNEL
);
18 seq_printf(seq
, "version %d\n", SCHEDSTAT_VERSION
);
19 seq_printf(seq
, "timestamp %lu\n", jiffies
);
20 for_each_online_cpu(cpu
) {
21 struct rq
*rq
= cpu_rq(cpu
);
23 struct sched_domain
*sd
;
27 /* runqueue-specific stats */
29 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
31 rq
->sched_switch
, rq
->sched_count
, rq
->sched_goidle
,
32 rq
->ttwu_count
, rq
->ttwu_local
,
34 rq
->rq_sched_info
.run_delay
, rq
->rq_sched_info
.pcount
);
36 seq_printf(seq
, "\n");
39 /* domain-specific stats */
41 for_each_domain(cpu
, sd
) {
42 enum cpu_idle_type itype
;
44 cpumask_scnprintf(mask_str
, mask_len
,
45 sched_domain_span(sd
));
46 seq_printf(seq
, "domain%d %s", dcount
++, mask_str
);
47 for (itype
= CPU_IDLE
; itype
< CPU_MAX_IDLE_TYPES
;
49 seq_printf(seq
, " %u %u %u %u %u %u %u %u",
51 sd
->lb_balanced
[itype
],
53 sd
->lb_imbalance
[itype
],
55 sd
->lb_hot_gained
[itype
],
56 sd
->lb_nobusyq
[itype
],
57 sd
->lb_nobusyg
[itype
]);
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
61 sd
->alb_count
, sd
->alb_failed
, sd
->alb_pushed
,
62 sd
->sbe_count
, sd
->sbe_balanced
, sd
->sbe_pushed
,
63 sd
->sbf_count
, sd
->sbf_balanced
, sd
->sbf_pushed
,
64 sd
->ttwu_wake_remote
, sd
->ttwu_move_affine
,
65 sd
->ttwu_move_balance
);
74 static int schedstat_open(struct inode
*inode
, struct file
*file
)
76 unsigned int size
= PAGE_SIZE
* (1 + num_online_cpus() / 32);
77 char *buf
= kmalloc(size
, GFP_KERNEL
);
83 res
= single_open(file
, show_schedstat
, NULL
);
85 m
= file
->private_data
;
93 static const struct file_operations proc_schedstat_operations
= {
94 .open
= schedstat_open
,
97 .release
= single_release
,
100 static int __init
proc_schedstat_init(void)
102 proc_create("schedstat", 0, NULL
, &proc_schedstat_operations
);
105 module_init(proc_schedstat_init
);
108 * Expects runqueue lock to be held for atomicity of update
111 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
114 rq
->rq_sched_info
.run_delay
+= delta
;
115 rq
->rq_sched_info
.pcount
++;
120 * Expects runqueue lock to be held for atomicity of update
123 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
126 rq
->rq_cpu_time
+= delta
;
130 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
133 rq
->rq_sched_info
.run_delay
+= delta
;
135 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
136 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
137 # define schedstat_set(var, val) do { var = (val); } while (0)
138 #else /* !CONFIG_SCHEDSTATS */
140 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
143 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
146 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
148 # define schedstat_inc(rq, field) do { } while (0)
149 # define schedstat_add(rq, field, amt) do { } while (0)
150 # define schedstat_set(var, val) do { } while (0)
153 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
154 static inline void sched_info_reset_dequeued(struct task_struct
*t
)
156 t
->sched_info
.last_queued
= 0;
160 * Called when a process is dequeued from the active array and given
161 * the cpu. We should note that with the exception of interactive
162 * tasks, the expired queue will become the active queue after the active
163 * queue is empty, without explicitly dequeuing and requeuing tasks in the
164 * expired queue. (Interactive tasks may be requeued directly to the
165 * active queue, thus delaying tasks in the expired queue from running;
166 * see scheduler_tick()).
168 * Though we are interested in knowing how long it was from the *first* time a
169 * task was queued to the time that it finally hit a cpu, we call this routine
170 * from dequeue_task() to account for possible rq->clock skew across cpus. The
171 * delta taken on each cpu would annul the skew.
173 static inline void sched_info_dequeued(struct task_struct
*t
)
175 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
177 if (unlikely(sched_info_on()))
178 if (t
->sched_info
.last_queued
)
179 delta
= now
- t
->sched_info
.last_queued
;
180 sched_info_reset_dequeued(t
);
181 t
->sched_info
.run_delay
+= delta
;
183 rq_sched_info_dequeued(task_rq(t
), delta
);
187 * Called when a task finally hits the cpu. We can now calculate how
188 * long it was waiting to run. We also note when it began so that we
189 * can keep stats on how long its timeslice is.
191 static void sched_info_arrive(struct task_struct
*t
)
193 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
195 if (t
->sched_info
.last_queued
)
196 delta
= now
- t
->sched_info
.last_queued
;
197 sched_info_reset_dequeued(t
);
198 t
->sched_info
.run_delay
+= delta
;
199 t
->sched_info
.last_arrival
= now
;
200 t
->sched_info
.pcount
++;
202 rq_sched_info_arrive(task_rq(t
), delta
);
206 * Called when a process is queued into either the active or expired
207 * array. The time is noted and later used to determine how long we
208 * had to wait for us to reach the cpu. Since the expired queue will
209 * become the active queue after active queue is empty, without dequeuing
210 * and requeuing any tasks, we are interested in queuing to either. It
211 * is unusual but not impossible for tasks to be dequeued and immediately
212 * requeued in the same or another array: this can happen in sched_yield(),
213 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
216 * This function is only called from enqueue_task(), but also only updates
217 * the timestamp if it is already not set. It's assumed that
218 * sched_info_dequeued() will clear that stamp when appropriate.
220 static inline void sched_info_queued(struct task_struct
*t
)
222 if (unlikely(sched_info_on()))
223 if (!t
->sched_info
.last_queued
)
224 t
->sched_info
.last_queued
= task_rq(t
)->clock
;
228 * Called when a process ceases being the active-running process, either
229 * voluntarily or involuntarily. Now we can calculate how long we ran.
230 * Also, if the process is still in the TASK_RUNNING state, call
231 * sched_info_queued() to mark that it has now again started waiting on
234 static inline void sched_info_depart(struct task_struct
*t
)
236 unsigned long long delta
= task_rq(t
)->clock
-
237 t
->sched_info
.last_arrival
;
239 rq_sched_info_depart(task_rq(t
), delta
);
241 if (t
->state
== TASK_RUNNING
)
242 sched_info_queued(t
);
246 * Called when tasks are switched involuntarily due, typically, to expiring
247 * their time slice. (This may also be called when switching to or from
248 * the idle task.) We are only called when prev != next.
251 __sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
253 struct rq
*rq
= task_rq(prev
);
256 * prev now departs the cpu. It's not interesting to record
257 * stats about how efficient we were at scheduling the idle
260 if (prev
!= rq
->idle
)
261 sched_info_depart(prev
);
263 if (next
!= rq
->idle
)
264 sched_info_arrive(next
);
267 sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
269 if (unlikely(sched_info_on()))
270 __sched_info_switch(prev
, next
);
273 #define sched_info_queued(t) do { } while (0)
274 #define sched_info_reset_dequeued(t) do { } while (0)
275 #define sched_info_dequeued(t) do { } while (0)
276 #define sched_info_switch(t, next) do { } while (0)
277 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
280 * The following are functions that support scheduler-internal time accounting.
281 * These functions are generally called at the timer tick. None of this depends
282 * on CONFIG_SCHEDSTATS.
286 * account_group_user_time - Maintain utime for a thread group.
288 * @tsk: Pointer to task structure.
289 * @cputime: Time value by which to increment the utime field of the
290 * thread_group_cputime structure.
292 * If thread group time is being maintained, get the structure for the
293 * running CPU and update the utime field there.
295 static inline void account_group_user_time(struct task_struct
*tsk
,
298 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
300 if (!cputimer
->running
)
303 spin_lock(&cputimer
->lock
);
304 cputimer
->cputime
.utime
=
305 cputime_add(cputimer
->cputime
.utime
, cputime
);
306 spin_unlock(&cputimer
->lock
);
310 * account_group_system_time - Maintain stime for a thread group.
312 * @tsk: Pointer to task structure.
313 * @cputime: Time value by which to increment the stime field of the
314 * thread_group_cputime structure.
316 * If thread group time is being maintained, get the structure for the
317 * running CPU and update the stime field there.
319 static inline void account_group_system_time(struct task_struct
*tsk
,
322 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
324 if (!cputimer
->running
)
327 spin_lock(&cputimer
->lock
);
328 cputimer
->cputime
.stime
=
329 cputime_add(cputimer
->cputime
.stime
, cputime
);
330 spin_unlock(&cputimer
->lock
);
334 * account_group_exec_runtime - Maintain exec runtime for a thread group.
336 * @tsk: Pointer to task structure.
337 * @ns: Time value by which to increment the sum_exec_runtime field
338 * of the thread_group_cputime structure.
340 * If thread group time is being maintained, get the structure for the
341 * running CPU and update the sum_exec_runtime field there.
343 static inline void account_group_exec_runtime(struct task_struct
*tsk
,
344 unsigned long long ns
)
346 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
348 if (!cputimer
->running
)
351 spin_lock(&cputimer
->lock
);
352 cputimer
->cputime
.sum_exec_runtime
+= ns
;
353 spin_unlock(&cputimer
->lock
);