2 #ifdef CONFIG_SCHEDSTATS
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
7 #define SCHEDSTAT_VERSION 14
9 static int show_schedstat(struct seq_file
*seq
, void *v
)
12 int mask_len
= NR_CPUS
/32 * 9;
13 char *mask_str
= kmalloc(mask_len
, GFP_KERNEL
);
18 seq_printf(seq
, "version %d\n", SCHEDSTAT_VERSION
);
19 seq_printf(seq
, "timestamp %lu\n", jiffies
);
20 for_each_online_cpu(cpu
) {
21 struct rq
*rq
= cpu_rq(cpu
);
23 struct sched_domain
*sd
;
27 /* runqueue-specific stats */
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
30 cpu
, rq
->yld_both_empty
,
31 rq
->yld_act_empty
, rq
->yld_exp_empty
, rq
->yld_count
,
32 rq
->sched_switch
, rq
->sched_count
, rq
->sched_goidle
,
33 rq
->ttwu_count
, rq
->ttwu_local
,
34 rq
->rq_sched_info
.cpu_time
,
35 rq
->rq_sched_info
.run_delay
, rq
->rq_sched_info
.pcount
);
37 seq_printf(seq
, "\n");
40 /* domain-specific stats */
42 for_each_domain(cpu
, sd
) {
43 enum cpu_idle_type itype
;
45 cpumask_scnprintf(mask_str
, mask_len
, sd
->span
);
46 seq_printf(seq
, "domain%d %s", dcount
++, mask_str
);
47 for (itype
= CPU_IDLE
; itype
< CPU_MAX_IDLE_TYPES
;
49 seq_printf(seq
, " %u %u %u %u %u %u %u %u",
51 sd
->lb_balanced
[itype
],
53 sd
->lb_imbalance
[itype
],
55 sd
->lb_hot_gained
[itype
],
56 sd
->lb_nobusyq
[itype
],
57 sd
->lb_nobusyg
[itype
]);
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
61 sd
->alb_count
, sd
->alb_failed
, sd
->alb_pushed
,
62 sd
->sbe_count
, sd
->sbe_balanced
, sd
->sbe_pushed
,
63 sd
->sbf_count
, sd
->sbf_balanced
, sd
->sbf_pushed
,
64 sd
->ttwu_wake_remote
, sd
->ttwu_move_affine
,
65 sd
->ttwu_move_balance
);
74 static int schedstat_open(struct inode
*inode
, struct file
*file
)
76 unsigned int size
= PAGE_SIZE
* (1 + num_online_cpus() / 32);
77 char *buf
= kmalloc(size
, GFP_KERNEL
);
83 res
= single_open(file
, show_schedstat
, NULL
);
85 m
= file
->private_data
;
93 const struct file_operations proc_schedstat_operations
= {
94 .open
= schedstat_open
,
97 .release
= single_release
,
101 * Expects runqueue lock to be held for atomicity of update
104 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
107 rq
->rq_sched_info
.run_delay
+= delta
;
108 rq
->rq_sched_info
.pcount
++;
113 * Expects runqueue lock to be held for atomicity of update
116 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
119 rq
->rq_sched_info
.cpu_time
+= delta
;
123 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
126 rq
->rq_sched_info
.run_delay
+= delta
;
128 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
129 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
130 # define schedstat_set(var, val) do { var = (val); } while (0)
131 #else /* !CONFIG_SCHEDSTATS */
133 rq_sched_info_arrive(struct rq
*rq
, unsigned long long delta
)
136 rq_sched_info_dequeued(struct rq
*rq
, unsigned long long delta
)
139 rq_sched_info_depart(struct rq
*rq
, unsigned long long delta
)
141 # define schedstat_inc(rq, field) do { } while (0)
142 # define schedstat_add(rq, field, amt) do { } while (0)
143 # define schedstat_set(var, val) do { } while (0)
146 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
147 static inline void sched_info_reset_dequeued(struct task_struct
*t
)
149 t
->sched_info
.last_queued
= 0;
153 * Called when a process is dequeued from the active array and given
154 * the cpu. We should note that with the exception of interactive
155 * tasks, the expired queue will become the active queue after the active
156 * queue is empty, without explicitly dequeuing and requeuing tasks in the
157 * expired queue. (Interactive tasks may be requeued directly to the
158 * active queue, thus delaying tasks in the expired queue from running;
159 * see scheduler_tick()).
161 * Though we are interested in knowing how long it was from the *first* time a
162 * task was queued to the time that it finally hit a cpu, we call this routine
163 * from dequeue_task() to account for possible rq->clock skew across cpus. The
164 * delta taken on each cpu would annul the skew.
166 static inline void sched_info_dequeued(struct task_struct
*t
)
168 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
170 if (unlikely(sched_info_on()))
171 if (t
->sched_info
.last_queued
)
172 delta
= now
- t
->sched_info
.last_queued
;
173 sched_info_reset_dequeued(t
);
174 t
->sched_info
.run_delay
+= delta
;
176 rq_sched_info_dequeued(task_rq(t
), delta
);
180 * Called when a task finally hits the cpu. We can now calculate how
181 * long it was waiting to run. We also note when it began so that we
182 * can keep stats on how long its timeslice is.
184 static void sched_info_arrive(struct task_struct
*t
)
186 unsigned long long now
= task_rq(t
)->clock
, delta
= 0;
188 if (t
->sched_info
.last_queued
)
189 delta
= now
- t
->sched_info
.last_queued
;
190 sched_info_reset_dequeued(t
);
191 t
->sched_info
.run_delay
+= delta
;
192 t
->sched_info
.last_arrival
= now
;
193 t
->sched_info
.pcount
++;
195 rq_sched_info_arrive(task_rq(t
), delta
);
199 * Called when a process is queued into either the active or expired
200 * array. The time is noted and later used to determine how long we
201 * had to wait for us to reach the cpu. Since the expired queue will
202 * become the active queue after active queue is empty, without dequeuing
203 * and requeuing any tasks, we are interested in queuing to either. It
204 * is unusual but not impossible for tasks to be dequeued and immediately
205 * requeued in the same or another array: this can happen in sched_yield(),
206 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
209 * This function is only called from enqueue_task(), but also only updates
210 * the timestamp if it is already not set. It's assumed that
211 * sched_info_dequeued() will clear that stamp when appropriate.
213 static inline void sched_info_queued(struct task_struct
*t
)
215 if (unlikely(sched_info_on()))
216 if (!t
->sched_info
.last_queued
)
217 t
->sched_info
.last_queued
= task_rq(t
)->clock
;
221 * Called when a process ceases being the active-running process, either
222 * voluntarily or involuntarily. Now we can calculate how long we ran.
223 * Also, if the process is still in the TASK_RUNNING state, call
224 * sched_info_queued() to mark that it has now again started waiting on
227 static inline void sched_info_depart(struct task_struct
*t
)
229 unsigned long long delta
= task_rq(t
)->clock
-
230 t
->sched_info
.last_arrival
;
232 t
->sched_info
.cpu_time
+= delta
;
233 rq_sched_info_depart(task_rq(t
), delta
);
235 if (t
->state
== TASK_RUNNING
)
236 sched_info_queued(t
);
240 * Called when tasks are switched involuntarily due, typically, to expiring
241 * their time slice. (This may also be called when switching to or from
242 * the idle task.) We are only called when prev != next.
245 __sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
247 struct rq
*rq
= task_rq(prev
);
250 * prev now departs the cpu. It's not interesting to record
251 * stats about how efficient we were at scheduling the idle
254 if (prev
!= rq
->idle
)
255 sched_info_depart(prev
);
257 if (next
!= rq
->idle
)
258 sched_info_arrive(next
);
261 sched_info_switch(struct task_struct
*prev
, struct task_struct
*next
)
263 if (unlikely(sched_info_on()))
264 __sched_info_switch(prev
, next
);
267 #define sched_info_queued(t) do { } while (0)
268 #define sched_info_reset_dequeued(t) do { } while (0)
269 #define sched_info_dequeued(t) do { } while (0)
270 #define sched_info_switch(t, next) do { } while (0)
271 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */