serial/8250: Limit the omap workarounds to omap1
[linux-2.6.git] / kernel / sched / stats.h
blob2ef90a51ec5e3ad6ccff9e8f44fde752d204dcda
2 #ifdef CONFIG_SCHEDSTATS
4 /*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7 static inline void
8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
17 * Expects runqueue lock to be held for atomicity of update
19 static inline void
20 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
22 if (rq)
23 rq->rq_cpu_time += delta;
26 static inline void
27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
32 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
34 # define schedstat_set(var, val) do { var = (val); } while (0)
35 #else /* !CONFIG_SCHEDSTATS */
36 static inline void
37 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
39 static inline void
40 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
42 static inline void
43 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
45 # define schedstat_inc(rq, field) do { } while (0)
46 # define schedstat_add(rq, field, amt) do { } while (0)
47 # define schedstat_set(var, val) do { } while (0)
48 #endif
50 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
51 static inline void sched_info_reset_dequeued(struct task_struct *t)
53 t->sched_info.last_queued = 0;
57 * We are interested in knowing how long it was from the *first* time a
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
62 static inline void sched_info_dequeued(struct task_struct *t)
64 unsigned long long now = task_rq(t)->clock, delta = 0;
66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued)
68 delta = now - t->sched_info.last_queued;
69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta;
72 rq_sched_info_dequeued(task_rq(t), delta);
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
80 static void sched_info_arrive(struct task_struct *t)
82 unsigned long long now = task_rq(t)->clock, delta = 0;
84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued;
86 sched_info_reset_dequeued(t);
87 t->sched_info.run_delay += delta;
88 t->sched_info.last_arrival = now;
89 t->sched_info.pcount++;
91 rq_sched_info_arrive(task_rq(t), delta);
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
99 static inline void sched_info_queued(struct task_struct *t)
101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = task_rq(t)->clock;
107 * Called when a process ceases being the active-running process, either
108 * voluntarily or involuntarily. Now we can calculate how long we ran.
109 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue.
113 static inline void sched_info_depart(struct task_struct *t)
115 unsigned long long delta = task_rq(t)->clock -
116 t->sched_info.last_arrival;
118 rq_sched_info_depart(task_rq(t), delta);
120 if (t->state == TASK_RUNNING)
121 sched_info_queued(t);
125 * Called when tasks are switched involuntarily due, typically, to expiring
126 * their time slice. (This may also be called when switching to or from
127 * the idle task.) We are only called when prev != next.
129 static inline void
130 __sched_info_switch(struct task_struct *prev, struct task_struct *next)
132 struct rq *rq = task_rq(prev);
135 * prev now departs the cpu. It's not interesting to record
136 * stats about how efficient we were at scheduling the idle
137 * process, however.
139 if (prev != rq->idle)
140 sched_info_depart(prev);
142 if (next != rq->idle)
143 sched_info_arrive(next);
145 static inline void
146 sched_info_switch(struct task_struct *prev, struct task_struct *next)
148 if (unlikely(sched_info_on()))
149 __sched_info_switch(prev, next);
151 #else
152 #define sched_info_queued(t) do { } while (0)
153 #define sched_info_reset_dequeued(t) do { } while (0)
154 #define sched_info_dequeued(t) do { } while (0)
155 #define sched_info_switch(t, next) do { } while (0)
156 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
159 * The following are functions that support scheduler-internal time accounting.
160 * These functions are generally called at the timer tick. None of this depends
161 * on CONFIG_SCHEDSTATS.
165 * account_group_user_time - Maintain utime for a thread group.
167 * @tsk: Pointer to task structure.
168 * @cputime: Time value by which to increment the utime field of the
169 * thread_group_cputime structure.
171 * If thread group time is being maintained, get the structure for the
172 * running CPU and update the utime field there.
174 static inline void account_group_user_time(struct task_struct *tsk,
175 cputime_t cputime)
177 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
179 if (!cputimer->running)
180 return;
182 raw_spin_lock(&cputimer->lock);
183 cputimer->cputime.utime += cputime;
184 raw_spin_unlock(&cputimer->lock);
188 * account_group_system_time - Maintain stime for a thread group.
190 * @tsk: Pointer to task structure.
191 * @cputime: Time value by which to increment the stime field of the
192 * thread_group_cputime structure.
194 * If thread group time is being maintained, get the structure for the
195 * running CPU and update the stime field there.
197 static inline void account_group_system_time(struct task_struct *tsk,
198 cputime_t cputime)
200 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
202 if (!cputimer->running)
203 return;
205 raw_spin_lock(&cputimer->lock);
206 cputimer->cputime.stime += cputime;
207 raw_spin_unlock(&cputimer->lock);
211 * account_group_exec_runtime - Maintain exec runtime for a thread group.
213 * @tsk: Pointer to task structure.
214 * @ns: Time value by which to increment the sum_exec_runtime field
215 * of the thread_group_cputime structure.
217 * If thread group time is being maintained, get the structure for the
218 * running CPU and update the sum_exec_runtime field there.
220 static inline void account_group_exec_runtime(struct task_struct *tsk,
221 unsigned long long ns)
223 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
225 if (!cputimer->running)
226 return;
228 raw_spin_lock(&cputimer->lock);
229 cputimer->cputime.sum_exec_runtime += ns;
230 raw_spin_unlock(&cputimer->lock);