2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 #include <linux/latencytop.h>
26 * Targeted preemption latency for CPU-bound tasks:
27 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
29 * NOTE: this latency value is not the same as the concept of
30 * 'timeslice length' - timeslices in CFS are of variable length
31 * and have no persistent notion like in traditional, time-slice
32 * based scheduling concepts.
34 * (to see the precise effective timeslice length of your workload,
35 * run vmstat and monitor the context-switches (cs) field)
37 unsigned int sysctl_sched_latency
= 20000000ULL;
40 * Minimal preemption granularity for CPU-bound tasks:
41 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
43 unsigned int sysctl_sched_min_granularity
= 4000000ULL;
46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
48 static unsigned int sched_nr_latency
= 5;
51 * After fork, child runs first. (default) If set to 0 then
52 * parent will (try to) run first.
54 const_debug
unsigned int sysctl_sched_child_runs_first
= 1;
57 * sys_sched_yield() compat mode
59 * This option switches the agressive yield implementation of the
60 * old scheduler back on.
62 unsigned int __read_mostly sysctl_sched_compat_yield
;
65 * SCHED_OTHER wake-up granularity.
66 * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
68 * This option delays the preemption effects of decoupled workloads
69 * and reduces their over-scheduling. Synchronous workloads will still
70 * have immediate wakeup/sleep latencies.
72 unsigned int sysctl_sched_wakeup_granularity
= 5000000UL;
74 const_debug
unsigned int sysctl_sched_migration_cost
= 500000UL;
76 static const struct sched_class fair_sched_class
;
78 /**************************************************************
79 * CFS operations on generic schedulable entities:
82 #ifdef CONFIG_FAIR_GROUP_SCHED
84 /* cpu runqueue to which this cfs_rq is attached */
85 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
90 /* An entity is a task if it doesn't "own" a runqueue */
91 #define entity_is_task(se) (!se->my_q)
93 static inline struct task_struct
*task_of(struct sched_entity
*se
)
95 #ifdef CONFIG_SCHED_DEBUG
96 WARN_ON_ONCE(!entity_is_task(se
));
98 return container_of(se
, struct task_struct
, se
);
101 /* Walk up scheduling entities hierarchy */
102 #define for_each_sched_entity(se) \
103 for (; se; se = se->parent)
105 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
110 /* runqueue on which this entity is (to be) queued */
111 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
116 /* runqueue "owned" by this group */
117 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
122 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
123 * another cpu ('this_cpu')
125 static inline struct cfs_rq
*cpu_cfs_rq(struct cfs_rq
*cfs_rq
, int this_cpu
)
127 return cfs_rq
->tg
->cfs_rq
[this_cpu
];
130 /* Iterate thr' all leaf cfs_rq's on a runqueue */
131 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
132 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
134 /* Do the two (enqueued) entities belong to the same group ? */
136 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
138 if (se
->cfs_rq
== pse
->cfs_rq
)
144 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
149 /* return depth at which a sched entity is present in the hierarchy */
150 static inline int depth_se(struct sched_entity
*se
)
154 for_each_sched_entity(se
)
161 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
163 int se_depth
, pse_depth
;
166 * preemption test can be made between sibling entities who are in the
167 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
168 * both tasks until we find their ancestors who are siblings of common
172 /* First walk up until both entities are at same depth */
173 se_depth
= depth_se(*se
);
174 pse_depth
= depth_se(*pse
);
176 while (se_depth
> pse_depth
) {
178 *se
= parent_entity(*se
);
181 while (pse_depth
> se_depth
) {
183 *pse
= parent_entity(*pse
);
186 while (!is_same_group(*se
, *pse
)) {
187 *se
= parent_entity(*se
);
188 *pse
= parent_entity(*pse
);
192 #else /* !CONFIG_FAIR_GROUP_SCHED */
194 static inline struct task_struct
*task_of(struct sched_entity
*se
)
196 return container_of(se
, struct task_struct
, se
);
199 static inline struct rq
*rq_of(struct cfs_rq
*cfs_rq
)
201 return container_of(cfs_rq
, struct rq
, cfs
);
204 #define entity_is_task(se) 1
206 #define for_each_sched_entity(se) \
207 for (; se; se = NULL)
209 static inline struct cfs_rq
*task_cfs_rq(struct task_struct
*p
)
211 return &task_rq(p
)->cfs
;
214 static inline struct cfs_rq
*cfs_rq_of(struct sched_entity
*se
)
216 struct task_struct
*p
= task_of(se
);
217 struct rq
*rq
= task_rq(p
);
222 /* runqueue "owned" by this group */
223 static inline struct cfs_rq
*group_cfs_rq(struct sched_entity
*grp
)
228 static inline struct cfs_rq
*cpu_cfs_rq(struct cfs_rq
*cfs_rq
, int this_cpu
)
230 return &cpu_rq(this_cpu
)->cfs
;
233 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
234 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
237 is_same_group(struct sched_entity
*se
, struct sched_entity
*pse
)
242 static inline struct sched_entity
*parent_entity(struct sched_entity
*se
)
248 find_matching_se(struct sched_entity
**se
, struct sched_entity
**pse
)
252 #endif /* CONFIG_FAIR_GROUP_SCHED */
255 /**************************************************************
256 * Scheduling class tree data structure manipulation methods:
259 static inline u64
max_vruntime(u64 min_vruntime
, u64 vruntime
)
261 s64 delta
= (s64
)(vruntime
- min_vruntime
);
263 min_vruntime
= vruntime
;
268 static inline u64
min_vruntime(u64 min_vruntime
, u64 vruntime
)
270 s64 delta
= (s64
)(vruntime
- min_vruntime
);
272 min_vruntime
= vruntime
;
277 static inline int entity_before(struct sched_entity
*a
,
278 struct sched_entity
*b
)
280 return (s64
)(a
->vruntime
- b
->vruntime
) < 0;
283 static inline s64
entity_key(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
285 return se
->vruntime
- cfs_rq
->min_vruntime
;
288 static void update_min_vruntime(struct cfs_rq
*cfs_rq
)
290 u64 vruntime
= cfs_rq
->min_vruntime
;
293 vruntime
= cfs_rq
->curr
->vruntime
;
295 if (cfs_rq
->rb_leftmost
) {
296 struct sched_entity
*se
= rb_entry(cfs_rq
->rb_leftmost
,
301 vruntime
= se
->vruntime
;
303 vruntime
= min_vruntime(vruntime
, se
->vruntime
);
306 cfs_rq
->min_vruntime
= max_vruntime(cfs_rq
->min_vruntime
, vruntime
);
310 * Enqueue an entity into the rb-tree:
312 static void __enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
314 struct rb_node
**link
= &cfs_rq
->tasks_timeline
.rb_node
;
315 struct rb_node
*parent
= NULL
;
316 struct sched_entity
*entry
;
317 s64 key
= entity_key(cfs_rq
, se
);
321 * Find the right place in the rbtree:
325 entry
= rb_entry(parent
, struct sched_entity
, run_node
);
327 * We dont care about collisions. Nodes with
328 * the same key stay together.
330 if (key
< entity_key(cfs_rq
, entry
)) {
331 link
= &parent
->rb_left
;
333 link
= &parent
->rb_right
;
339 * Maintain a cache of leftmost tree entries (it is frequently
343 cfs_rq
->rb_leftmost
= &se
->run_node
;
345 rb_link_node(&se
->run_node
, parent
, link
);
346 rb_insert_color(&se
->run_node
, &cfs_rq
->tasks_timeline
);
349 static void __dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
351 if (cfs_rq
->rb_leftmost
== &se
->run_node
) {
352 struct rb_node
*next_node
;
354 next_node
= rb_next(&se
->run_node
);
355 cfs_rq
->rb_leftmost
= next_node
;
358 rb_erase(&se
->run_node
, &cfs_rq
->tasks_timeline
);
361 static struct sched_entity
*__pick_next_entity(struct cfs_rq
*cfs_rq
)
363 struct rb_node
*left
= cfs_rq
->rb_leftmost
;
368 return rb_entry(left
, struct sched_entity
, run_node
);
371 static struct sched_entity
*__pick_last_entity(struct cfs_rq
*cfs_rq
)
373 struct rb_node
*last
= rb_last(&cfs_rq
->tasks_timeline
);
378 return rb_entry(last
, struct sched_entity
, run_node
);
381 /**************************************************************
382 * Scheduling class statistics methods:
385 #ifdef CONFIG_SCHED_DEBUG
386 int sched_nr_latency_handler(struct ctl_table
*table
, int write
,
387 struct file
*filp
, void __user
*buffer
, size_t *lenp
,
390 int ret
= proc_dointvec_minmax(table
, write
, filp
, buffer
, lenp
, ppos
);
395 sched_nr_latency
= DIV_ROUND_UP(sysctl_sched_latency
,
396 sysctl_sched_min_granularity
);
405 static inline unsigned long
406 calc_delta_fair(unsigned long delta
, struct sched_entity
*se
)
408 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
409 delta
= calc_delta_mine(delta
, NICE_0_LOAD
, &se
->load
);
415 * The idea is to set a period in which each task runs once.
417 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
418 * this period because otherwise the slices get too small.
420 * p = (nr <= nl) ? l : l*nr/nl
422 static u64
__sched_period(unsigned long nr_running
)
424 u64 period
= sysctl_sched_latency
;
425 unsigned long nr_latency
= sched_nr_latency
;
427 if (unlikely(nr_running
> nr_latency
)) {
428 period
= sysctl_sched_min_granularity
;
429 period
*= nr_running
;
436 * We calculate the wall-time slice from the period by taking a part
437 * proportional to the weight.
441 static u64
sched_slice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
443 u64 slice
= __sched_period(cfs_rq
->nr_running
+ !se
->on_rq
);
445 for_each_sched_entity(se
) {
446 struct load_weight
*load
;
447 struct load_weight lw
;
449 cfs_rq
= cfs_rq_of(se
);
450 load
= &cfs_rq
->load
;
452 if (unlikely(!se
->on_rq
)) {
455 update_load_add(&lw
, se
->load
.weight
);
458 slice
= calc_delta_mine(slice
, se
->load
.weight
, load
);
464 * We calculate the vruntime slice of a to be inserted task
468 static u64
sched_vslice(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
470 return calc_delta_fair(sched_slice(cfs_rq
, se
), se
);
474 * Update the current task's runtime statistics. Skip current tasks that
475 * are not in our scheduling class.
478 __update_curr(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
,
479 unsigned long delta_exec
)
481 unsigned long delta_exec_weighted
;
483 schedstat_set(curr
->exec_max
, max((u64
)delta_exec
, curr
->exec_max
));
485 curr
->sum_exec_runtime
+= delta_exec
;
486 schedstat_add(cfs_rq
, exec_clock
, delta_exec
);
487 delta_exec_weighted
= calc_delta_fair(delta_exec
, curr
);
488 curr
->vruntime
+= delta_exec_weighted
;
489 update_min_vruntime(cfs_rq
);
492 static void update_curr(struct cfs_rq
*cfs_rq
)
494 struct sched_entity
*curr
= cfs_rq
->curr
;
495 u64 now
= rq_of(cfs_rq
)->clock
;
496 unsigned long delta_exec
;
502 * Get the amount of time the current task was running
503 * since the last time we changed load (this cannot
504 * overflow on 32 bits):
506 delta_exec
= (unsigned long)(now
- curr
->exec_start
);
510 __update_curr(cfs_rq
, curr
, delta_exec
);
511 curr
->exec_start
= now
;
513 if (entity_is_task(curr
)) {
514 struct task_struct
*curtask
= task_of(curr
);
516 cpuacct_charge(curtask
, delta_exec
);
517 account_group_exec_runtime(curtask
, delta_exec
);
522 update_stats_wait_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
524 schedstat_set(se
->wait_start
, rq_of(cfs_rq
)->clock
);
528 * Task is being enqueued - update stats:
530 static void update_stats_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
533 * Are we enqueueing a waiting task? (for current tasks
534 * a dequeue/enqueue event is a NOP)
536 if (se
!= cfs_rq
->curr
)
537 update_stats_wait_start(cfs_rq
, se
);
541 update_stats_wait_end(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
543 schedstat_set(se
->wait_max
, max(se
->wait_max
,
544 rq_of(cfs_rq
)->clock
- se
->wait_start
));
545 schedstat_set(se
->wait_count
, se
->wait_count
+ 1);
546 schedstat_set(se
->wait_sum
, se
->wait_sum
+
547 rq_of(cfs_rq
)->clock
- se
->wait_start
);
548 schedstat_set(se
->wait_start
, 0);
550 #ifdef CONFIG_SCHEDSTATS
551 if (entity_is_task(se
)) {
552 trace_sched_stat_wait(task_of(se
),
553 rq_of(cfs_rq
)->clock
- se
->wait_start
);
559 update_stats_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
562 * Mark the end of the wait period if dequeueing a
565 if (se
!= cfs_rq
->curr
)
566 update_stats_wait_end(cfs_rq
, se
);
570 * We are picking a new current task - update its stats:
573 update_stats_curr_start(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
576 * We are starting a new run period:
578 se
->exec_start
= rq_of(cfs_rq
)->clock
;
581 /**************************************************
582 * Scheduling class queueing methods:
585 #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
587 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
589 cfs_rq
->task_weight
+= weight
;
593 add_cfs_task_weight(struct cfs_rq
*cfs_rq
, unsigned long weight
)
599 account_entity_enqueue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
601 update_load_add(&cfs_rq
->load
, se
->load
.weight
);
602 if (!parent_entity(se
))
603 inc_cpu_load(rq_of(cfs_rq
), se
->load
.weight
);
604 if (entity_is_task(se
)) {
605 add_cfs_task_weight(cfs_rq
, se
->load
.weight
);
606 list_add(&se
->group_node
, &cfs_rq
->tasks
);
608 cfs_rq
->nr_running
++;
613 account_entity_dequeue(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
615 update_load_sub(&cfs_rq
->load
, se
->load
.weight
);
616 if (!parent_entity(se
))
617 dec_cpu_load(rq_of(cfs_rq
), se
->load
.weight
);
618 if (entity_is_task(se
)) {
619 add_cfs_task_weight(cfs_rq
, -se
->load
.weight
);
620 list_del_init(&se
->group_node
);
622 cfs_rq
->nr_running
--;
626 static void enqueue_sleeper(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
628 #ifdef CONFIG_SCHEDSTATS
629 struct task_struct
*tsk
= NULL
;
631 if (entity_is_task(se
))
634 if (se
->sleep_start
) {
635 u64 delta
= rq_of(cfs_rq
)->clock
- se
->sleep_start
;
640 if (unlikely(delta
> se
->sleep_max
))
641 se
->sleep_max
= delta
;
644 se
->sum_sleep_runtime
+= delta
;
647 account_scheduler_latency(tsk
, delta
>> 10, 1);
648 trace_sched_stat_sleep(tsk
, delta
);
651 if (se
->block_start
) {
652 u64 delta
= rq_of(cfs_rq
)->clock
- se
->block_start
;
657 if (unlikely(delta
> se
->block_max
))
658 se
->block_max
= delta
;
661 se
->sum_sleep_runtime
+= delta
;
664 if (tsk
->in_iowait
) {
665 se
->iowait_sum
+= delta
;
667 trace_sched_stat_iowait(tsk
, delta
);
671 * Blocking time is in units of nanosecs, so shift by
672 * 20 to get a milliseconds-range estimation of the
673 * amount of time that the task spent sleeping:
675 if (unlikely(prof_on
== SLEEP_PROFILING
)) {
676 profile_hits(SLEEP_PROFILING
,
677 (void *)get_wchan(tsk
),
680 account_scheduler_latency(tsk
, delta
>> 10, 0);
686 static void check_spread(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
688 #ifdef CONFIG_SCHED_DEBUG
689 s64 d
= se
->vruntime
- cfs_rq
->min_vruntime
;
694 if (d
> 3*sysctl_sched_latency
)
695 schedstat_inc(cfs_rq
, nr_spread_over
);
700 place_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int initial
)
702 u64 vruntime
= cfs_rq
->min_vruntime
;
705 * The 'current' period is already promised to the current tasks,
706 * however the extra weight of the new task will slow them down a
707 * little, place the new task so that it fits in the slot that
708 * stays open at the end.
710 if (initial
&& sched_feat(START_DEBIT
))
711 vruntime
+= sched_vslice(cfs_rq
, se
);
714 /* sleeps upto a single latency don't count. */
715 if (sched_feat(NEW_FAIR_SLEEPERS
)) {
716 unsigned long thresh
= sysctl_sched_latency
;
719 * Convert the sleeper threshold into virtual time.
720 * SCHED_IDLE is a special sub-class. We care about
721 * fairness only relative to other SCHED_IDLE tasks,
722 * all of which have the same weight.
724 if (sched_feat(NORMALIZED_SLEEPER
) &&
725 (!entity_is_task(se
) ||
726 task_of(se
)->policy
!= SCHED_IDLE
))
727 thresh
= calc_delta_fair(thresh
, se
);
732 /* ensure we never gain time by being placed backwards. */
733 vruntime
= max_vruntime(se
->vruntime
, vruntime
);
736 se
->vruntime
= vruntime
;
740 enqueue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int wakeup
)
743 * Update run-time statistics of the 'current'.
746 account_entity_enqueue(cfs_rq
, se
);
749 place_entity(cfs_rq
, se
, 0);
750 enqueue_sleeper(cfs_rq
, se
);
753 update_stats_enqueue(cfs_rq
, se
);
754 check_spread(cfs_rq
, se
);
755 if (se
!= cfs_rq
->curr
)
756 __enqueue_entity(cfs_rq
, se
);
759 static void __clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
761 if (cfs_rq
->last
== se
)
764 if (cfs_rq
->next
== se
)
768 static void clear_buddies(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
770 for_each_sched_entity(se
)
771 __clear_buddies(cfs_rq_of(se
), se
);
775 dequeue_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
, int sleep
)
778 * Update run-time statistics of the 'current'.
782 update_stats_dequeue(cfs_rq
, se
);
784 #ifdef CONFIG_SCHEDSTATS
785 if (entity_is_task(se
)) {
786 struct task_struct
*tsk
= task_of(se
);
788 if (tsk
->state
& TASK_INTERRUPTIBLE
)
789 se
->sleep_start
= rq_of(cfs_rq
)->clock
;
790 if (tsk
->state
& TASK_UNINTERRUPTIBLE
)
791 se
->block_start
= rq_of(cfs_rq
)->clock
;
796 clear_buddies(cfs_rq
, se
);
798 if (se
!= cfs_rq
->curr
)
799 __dequeue_entity(cfs_rq
, se
);
800 account_entity_dequeue(cfs_rq
, se
);
801 update_min_vruntime(cfs_rq
);
805 * Preempt the current task with a newly woken task if needed:
808 check_preempt_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
)
810 unsigned long ideal_runtime
, delta_exec
;
812 ideal_runtime
= sched_slice(cfs_rq
, curr
);
813 delta_exec
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
814 if (delta_exec
> ideal_runtime
) {
815 resched_task(rq_of(cfs_rq
)->curr
);
817 * The current task ran long enough, ensure it doesn't get
818 * re-elected due to buddy favours.
820 clear_buddies(cfs_rq
, curr
);
825 set_next_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*se
)
827 /* 'current' is not kept within the tree. */
830 * Any task has to be enqueued before it get to execute on
831 * a CPU. So account for the time it spent waiting on the
834 update_stats_wait_end(cfs_rq
, se
);
835 __dequeue_entity(cfs_rq
, se
);
838 update_stats_curr_start(cfs_rq
, se
);
840 #ifdef CONFIG_SCHEDSTATS
842 * Track our maximum slice length, if the CPU's load is at
843 * least twice that of our own weight (i.e. dont track it
844 * when there are only lesser-weight tasks around):
846 if (rq_of(cfs_rq
)->load
.weight
>= 2*se
->load
.weight
) {
847 se
->slice_max
= max(se
->slice_max
,
848 se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
);
851 se
->prev_sum_exec_runtime
= se
->sum_exec_runtime
;
855 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
);
857 static struct sched_entity
*pick_next_entity(struct cfs_rq
*cfs_rq
)
859 struct sched_entity
*se
= __pick_next_entity(cfs_rq
);
861 if (cfs_rq
->next
&& wakeup_preempt_entity(cfs_rq
->next
, se
) < 1)
864 if (cfs_rq
->last
&& wakeup_preempt_entity(cfs_rq
->last
, se
) < 1)
870 static void put_prev_entity(struct cfs_rq
*cfs_rq
, struct sched_entity
*prev
)
873 * If still on the runqueue then deactivate_task()
874 * was not called and update_curr() has to be done:
879 check_spread(cfs_rq
, prev
);
881 update_stats_wait_start(cfs_rq
, prev
);
882 /* Put 'current' back into the tree. */
883 __enqueue_entity(cfs_rq
, prev
);
889 entity_tick(struct cfs_rq
*cfs_rq
, struct sched_entity
*curr
, int queued
)
892 * Update run-time statistics of the 'current'.
896 #ifdef CONFIG_SCHED_HRTICK
898 * queued ticks are scheduled to match the slice, so don't bother
899 * validating it and just reschedule.
902 resched_task(rq_of(cfs_rq
)->curr
);
906 * don't let the period tick interfere with the hrtick preemption
908 if (!sched_feat(DOUBLE_TICK
) &&
909 hrtimer_active(&rq_of(cfs_rq
)->hrtick_timer
))
913 if (cfs_rq
->nr_running
> 1 || !sched_feat(WAKEUP_PREEMPT
))
914 check_preempt_tick(cfs_rq
, curr
);
917 /**************************************************
918 * CFS operations on tasks:
921 #ifdef CONFIG_SCHED_HRTICK
922 static void hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
924 struct sched_entity
*se
= &p
->se
;
925 struct cfs_rq
*cfs_rq
= cfs_rq_of(se
);
927 WARN_ON(task_rq(p
) != rq
);
929 if (hrtick_enabled(rq
) && cfs_rq
->nr_running
> 1) {
930 u64 slice
= sched_slice(cfs_rq
, se
);
931 u64 ran
= se
->sum_exec_runtime
- se
->prev_sum_exec_runtime
;
932 s64 delta
= slice
- ran
;
941 * Don't schedule slices shorter than 10000ns, that just
942 * doesn't make sense. Rely on vruntime for fairness.
945 delta
= max_t(s64
, 10000LL, delta
);
947 hrtick_start(rq
, delta
);
952 * called from enqueue/dequeue and updates the hrtick when the
953 * current task is from our class and nr_running is low enough
956 static void hrtick_update(struct rq
*rq
)
958 struct task_struct
*curr
= rq
->curr
;
960 if (curr
->sched_class
!= &fair_sched_class
)
963 if (cfs_rq_of(&curr
->se
)->nr_running
< sched_nr_latency
)
964 hrtick_start_fair(rq
, curr
);
966 #else /* !CONFIG_SCHED_HRTICK */
968 hrtick_start_fair(struct rq
*rq
, struct task_struct
*p
)
972 static inline void hrtick_update(struct rq
*rq
)
978 * The enqueue_task method is called before nr_running is
979 * increased. Here we update the fair scheduling stats and
980 * then put the task into the rbtree:
982 static void enqueue_task_fair(struct rq
*rq
, struct task_struct
*p
, int wakeup
)
984 struct cfs_rq
*cfs_rq
;
985 struct sched_entity
*se
= &p
->se
;
987 for_each_sched_entity(se
) {
990 cfs_rq
= cfs_rq_of(se
);
991 enqueue_entity(cfs_rq
, se
, wakeup
);
999 * The dequeue_task method is called before nr_running is
1000 * decreased. We remove the task from the rbtree and
1001 * update the fair scheduling stats:
1003 static void dequeue_task_fair(struct rq
*rq
, struct task_struct
*p
, int sleep
)
1005 struct cfs_rq
*cfs_rq
;
1006 struct sched_entity
*se
= &p
->se
;
1008 for_each_sched_entity(se
) {
1009 cfs_rq
= cfs_rq_of(se
);
1010 dequeue_entity(cfs_rq
, se
, sleep
);
1011 /* Don't dequeue parent if it has other entities besides us */
1012 if (cfs_rq
->load
.weight
)
1021 * sched_yield() support is very simple - we dequeue and enqueue.
1023 * If compat_yield is turned on then we requeue to the end of the tree.
1025 static void yield_task_fair(struct rq
*rq
)
1027 struct task_struct
*curr
= rq
->curr
;
1028 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
1029 struct sched_entity
*rightmost
, *se
= &curr
->se
;
1032 * Are we the only task in the tree?
1034 if (unlikely(cfs_rq
->nr_running
== 1))
1037 clear_buddies(cfs_rq
, se
);
1039 if (likely(!sysctl_sched_compat_yield
) && curr
->policy
!= SCHED_BATCH
) {
1040 update_rq_clock(rq
);
1042 * Update run-time statistics of the 'current'.
1044 update_curr(cfs_rq
);
1049 * Find the rightmost entry in the rbtree:
1051 rightmost
= __pick_last_entity(cfs_rq
);
1053 * Already in the rightmost position?
1055 if (unlikely(!rightmost
|| entity_before(rightmost
, se
)))
1059 * Minimally necessary key value to be last in the tree:
1060 * Upon rescheduling, sched_class::put_prev_task() will place
1061 * 'current' within the tree based on its new key value.
1063 se
->vruntime
= rightmost
->vruntime
+ 1;
1067 * wake_idle() will wake a task on an idle cpu if task->cpu is
1068 * not idle and an idle cpu is available. The span of cpus to
1069 * search starts with cpus closest then further out as needed,
1070 * so we always favor a closer, idle cpu.
1071 * Domains may include CPUs that are not usable for migration,
1072 * hence we need to mask them out (rq->rd->online)
1074 * Returns the CPU we should wake onto.
1076 #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1078 #define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
1080 static int wake_idle(int cpu
, struct task_struct
*p
)
1082 struct sched_domain
*sd
;
1084 unsigned int chosen_wakeup_cpu
;
1086 struct rq
*task_rq
= task_rq(p
);
1089 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1090 * are idle and this is not a kernel thread and this task's affinity
1091 * allows it to be moved to preferred cpu, then just move!
1094 this_cpu
= smp_processor_id();
1096 cpu_rq(this_cpu
)->rd
->sched_mc_preferred_wakeup_cpu
;
1098 if (sched_mc_power_savings
>= POWERSAVINGS_BALANCE_WAKEUP
&&
1099 idle_cpu(cpu
) && idle_cpu(this_cpu
) &&
1100 p
->mm
&& !(p
->flags
& PF_KTHREAD
) &&
1101 cpu_isset(chosen_wakeup_cpu
, p
->cpus_allowed
))
1102 return chosen_wakeup_cpu
;
1105 * If it is idle, then it is the best cpu to run this task.
1107 * This cpu is also the best, if it has more than one task already.
1108 * Siblings must be also busy(in most cases) as they didn't already
1109 * pickup the extra load from this cpu and hence we need not check
1110 * sibling runqueue info. This will avoid the checks and cache miss
1111 * penalities associated with that.
1113 if (idle_cpu(cpu
) || cpu_rq(cpu
)->cfs
.nr_running
> 1)
1116 for_each_domain(cpu
, sd
) {
1117 if ((sd
->flags
& SD_WAKE_IDLE
)
1118 || ((sd
->flags
& SD_WAKE_IDLE_FAR
)
1119 && !task_hot(p
, task_rq
->clock
, sd
))) {
1120 for_each_cpu_and(i
, sched_domain_span(sd
),
1122 if (cpu_rd_active(i
, task_rq
) && idle_cpu(i
)) {
1123 if (i
!= task_cpu(p
)) {
1125 se
.nr_wakeups_idle
);
1136 #else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
1137 static inline int wake_idle(int cpu
, struct task_struct
*p
)
1145 #ifdef CONFIG_FAIR_GROUP_SCHED
1147 * effective_load() calculates the load change as seen from the root_task_group
1149 * Adding load to a group doesn't make a group heavier, but can cause movement
1150 * of group shares between cpus. Assuming the shares were perfectly aligned one
1151 * can calculate the shift in shares.
1153 * The problem is that perfectly aligning the shares is rather expensive, hence
1154 * we try to avoid doing that too often - see update_shares(), which ratelimits
1157 * We compensate this by not only taking the current delta into account, but
1158 * also considering the delta between when the shares were last adjusted and
1161 * We still saw a performance dip, some tracing learned us that between
1162 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1163 * significantly. Therefore try to bias the error in direction of failing
1164 * the affine wakeup.
1167 static long effective_load(struct task_group
*tg
, int cpu
,
1170 struct sched_entity
*se
= tg
->se
[cpu
];
1176 * By not taking the decrease of shares on the other cpu into
1177 * account our error leans towards reducing the affine wakeups.
1179 if (!wl
&& sched_feat(ASYM_EFF_LOAD
))
1182 for_each_sched_entity(se
) {
1183 long S
, rw
, s
, a
, b
;
1187 * Instead of using this increment, also add the difference
1188 * between when the shares were last updated and now.
1190 more_w
= se
->my_q
->load
.weight
- se
->my_q
->rq_weight
;
1194 S
= se
->my_q
->tg
->shares
;
1195 s
= se
->my_q
->shares
;
1196 rw
= se
->my_q
->rq_weight
;
1207 * Assume the group is already running and will
1208 * thus already be accounted for in the weight.
1210 * That is, moving shares between CPUs, does not
1211 * alter the group weight.
1221 static inline unsigned long effective_load(struct task_group
*tg
, int cpu
,
1222 unsigned long wl
, unsigned long wg
)
1230 wake_affine(struct sched_domain
*this_sd
, struct rq
*this_rq
,
1231 struct task_struct
*p
, int prev_cpu
, int this_cpu
, int sync
,
1232 int idx
, unsigned long load
, unsigned long this_load
,
1233 unsigned int imbalance
)
1235 struct task_struct
*curr
= this_rq
->curr
;
1236 struct task_group
*tg
;
1237 unsigned long tl
= this_load
;
1238 unsigned long tl_per_task
;
1239 unsigned long weight
;
1242 if (!(this_sd
->flags
& SD_WAKE_AFFINE
) || !sched_feat(AFFINE_WAKEUPS
))
1245 if (sync
&& (curr
->se
.avg_overlap
> sysctl_sched_migration_cost
||
1246 p
->se
.avg_overlap
> sysctl_sched_migration_cost
))
1250 * If sync wakeup then subtract the (maximum possible)
1251 * effect of the currently running task from the load
1252 * of the current CPU:
1255 tg
= task_group(current
);
1256 weight
= current
->se
.load
.weight
;
1258 tl
+= effective_load(tg
, this_cpu
, -weight
, -weight
);
1259 load
+= effective_load(tg
, prev_cpu
, 0, -weight
);
1263 weight
= p
->se
.load
.weight
;
1265 balanced
= 100*(tl
+ effective_load(tg
, this_cpu
, weight
, weight
)) <=
1266 imbalance
*(load
+ effective_load(tg
, prev_cpu
, 0, weight
));
1269 * If the currently running task will sleep within
1270 * a reasonable amount of time then attract this newly
1273 if (sync
&& balanced
)
1276 schedstat_inc(p
, se
.nr_wakeups_affine_attempts
);
1277 tl_per_task
= cpu_avg_load_per_task(this_cpu
);
1279 if (balanced
|| (tl
<= load
&& tl
+ target_load(prev_cpu
, idx
) <=
1282 * This domain has SD_WAKE_AFFINE and
1283 * p is cache cold in this domain, and
1284 * there is no bad imbalance.
1286 schedstat_inc(this_sd
, ttwu_move_affine
);
1287 schedstat_inc(p
, se
.nr_wakeups_affine
);
1294 static int select_task_rq_fair(struct task_struct
*p
, int sync
)
1296 struct sched_domain
*sd
, *this_sd
= NULL
;
1297 int prev_cpu
, this_cpu
, new_cpu
;
1298 unsigned long load
, this_load
;
1300 unsigned int imbalance
;
1303 prev_cpu
= task_cpu(p
);
1304 this_cpu
= smp_processor_id();
1305 this_rq
= cpu_rq(this_cpu
);
1308 if (prev_cpu
== this_cpu
)
1311 * 'this_sd' is the first domain that both
1312 * this_cpu and prev_cpu are present in:
1314 for_each_domain(this_cpu
, sd
) {
1315 if (cpumask_test_cpu(prev_cpu
, sched_domain_span(sd
))) {
1321 if (unlikely(!cpumask_test_cpu(this_cpu
, &p
->cpus_allowed
)))
1325 * Check for affine wakeup and passive balancing possibilities.
1330 idx
= this_sd
->wake_idx
;
1332 imbalance
= 100 + (this_sd
->imbalance_pct
- 100) / 2;
1334 load
= source_load(prev_cpu
, idx
);
1335 this_load
= target_load(this_cpu
, idx
);
1337 if (wake_affine(this_sd
, this_rq
, p
, prev_cpu
, this_cpu
, sync
, idx
,
1338 load
, this_load
, imbalance
))
1342 * Start passive balancing when half the imbalance_pct
1345 if (this_sd
->flags
& SD_WAKE_BALANCE
) {
1346 if (imbalance
*this_load
<= 100*load
) {
1347 schedstat_inc(this_sd
, ttwu_move_balance
);
1348 schedstat_inc(p
, se
.nr_wakeups_passive
);
1354 return wake_idle(new_cpu
, p
);
1356 #endif /* CONFIG_SMP */
1359 * Adaptive granularity
1361 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1362 * with the limit of wakeup_gran -- when it never does a wakeup.
1364 * So the smaller avg_wakeup is the faster we want this task to preempt,
1365 * but we don't want to treat the preemptee unfairly and therefore allow it
1366 * to run for at least the amount of time we'd like to run.
1368 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1370 * NOTE: we use *nr_running to scale with load, this nicely matches the
1371 * degrading latency on load.
1373 static unsigned long
1374 adaptive_gran(struct sched_entity
*curr
, struct sched_entity
*se
)
1376 u64 this_run
= curr
->sum_exec_runtime
- curr
->prev_sum_exec_runtime
;
1377 u64 expected_wakeup
= 2*se
->avg_wakeup
* cfs_rq_of(se
)->nr_running
;
1380 if (this_run
< expected_wakeup
)
1381 gran
= expected_wakeup
- this_run
;
1383 return min_t(s64
, gran
, sysctl_sched_wakeup_granularity
);
1386 static unsigned long
1387 wakeup_gran(struct sched_entity
*curr
, struct sched_entity
*se
)
1389 unsigned long gran
= sysctl_sched_wakeup_granularity
;
1391 if (cfs_rq_of(curr
)->curr
&& sched_feat(ADAPTIVE_GRAN
))
1392 gran
= adaptive_gran(curr
, se
);
1395 * Since its curr running now, convert the gran from real-time
1396 * to virtual-time in his units.
1398 if (sched_feat(ASYM_GRAN
)) {
1400 * By using 'se' instead of 'curr' we penalize light tasks, so
1401 * they get preempted easier. That is, if 'se' < 'curr' then
1402 * the resulting gran will be larger, therefore penalizing the
1403 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1404 * be smaller, again penalizing the lighter task.
1406 * This is especially important for buddies when the leftmost
1407 * task is higher priority than the buddy.
1409 if (unlikely(se
->load
.weight
!= NICE_0_LOAD
))
1410 gran
= calc_delta_fair(gran
, se
);
1412 if (unlikely(curr
->load
.weight
!= NICE_0_LOAD
))
1413 gran
= calc_delta_fair(gran
, curr
);
1420 * Should 'se' preempt 'curr'.
1434 wakeup_preempt_entity(struct sched_entity
*curr
, struct sched_entity
*se
)
1436 s64 gran
, vdiff
= curr
->vruntime
- se
->vruntime
;
1441 gran
= wakeup_gran(curr
, se
);
1448 static void set_last_buddy(struct sched_entity
*se
)
1450 if (likely(task_of(se
)->policy
!= SCHED_IDLE
)) {
1451 for_each_sched_entity(se
)
1452 cfs_rq_of(se
)->last
= se
;
1456 static void set_next_buddy(struct sched_entity
*se
)
1458 if (likely(task_of(se
)->policy
!= SCHED_IDLE
)) {
1459 for_each_sched_entity(se
)
1460 cfs_rq_of(se
)->next
= se
;
1465 * Preempt the current task with a newly woken task if needed:
1467 static void check_preempt_wakeup(struct rq
*rq
, struct task_struct
*p
, int sync
)
1469 struct task_struct
*curr
= rq
->curr
;
1470 struct sched_entity
*se
= &curr
->se
, *pse
= &p
->se
;
1471 struct cfs_rq
*cfs_rq
= task_cfs_rq(curr
);
1473 update_curr(cfs_rq
);
1475 if (unlikely(rt_prio(p
->prio
))) {
1480 if (unlikely(p
->sched_class
!= &fair_sched_class
))
1483 if (unlikely(se
== pse
))
1487 * Only set the backward buddy when the current task is still on the
1488 * rq. This can happen when a wakeup gets interleaved with schedule on
1489 * the ->pre_schedule() or idle_balance() point, either of which can
1492 * Also, during early boot the idle thread is in the fair class, for
1493 * obvious reasons its a bad idea to schedule back to the idle thread.
1495 if (sched_feat(LAST_BUDDY
) && likely(se
->on_rq
&& curr
!= rq
->idle
))
1497 set_next_buddy(pse
);
1500 * We can come here with TIF_NEED_RESCHED already set from new task
1503 if (test_tsk_need_resched(curr
))
1507 * Batch and idle tasks do not preempt (their preemption is driven by
1510 if (unlikely(p
->policy
!= SCHED_NORMAL
))
1513 /* Idle tasks are by definition preempted by everybody. */
1514 if (unlikely(curr
->policy
== SCHED_IDLE
)) {
1519 if (!sched_feat(WAKEUP_PREEMPT
))
1522 if (sched_feat(WAKEUP_OVERLAP
) && (sync
||
1523 (se
->avg_overlap
< sysctl_sched_migration_cost
&&
1524 pse
->avg_overlap
< sysctl_sched_migration_cost
))) {
1529 find_matching_se(&se
, &pse
);
1533 if (wakeup_preempt_entity(se
, pse
) == 1)
1537 static struct task_struct
*pick_next_task_fair(struct rq
*rq
)
1539 struct task_struct
*p
;
1540 struct cfs_rq
*cfs_rq
= &rq
->cfs
;
1541 struct sched_entity
*se
;
1543 if (unlikely(!cfs_rq
->nr_running
))
1547 se
= pick_next_entity(cfs_rq
);
1549 * If se was a buddy, clear it so that it will have to earn
1552 __clear_buddies(cfs_rq
, se
);
1553 set_next_entity(cfs_rq
, se
);
1554 cfs_rq
= group_cfs_rq(se
);
1558 hrtick_start_fair(rq
, p
);
1564 * Account for a descheduled task:
1566 static void put_prev_task_fair(struct rq
*rq
, struct task_struct
*prev
)
1568 struct sched_entity
*se
= &prev
->se
;
1569 struct cfs_rq
*cfs_rq
;
1571 for_each_sched_entity(se
) {
1572 cfs_rq
= cfs_rq_of(se
);
1573 put_prev_entity(cfs_rq
, se
);
1578 /**************************************************
1579 * Fair scheduling class load-balancing methods:
1583 * Load-balancing iterator. Note: while the runqueue stays locked
1584 * during the whole iteration, the current task might be
1585 * dequeued so the iterator has to be dequeue-safe. Here we
1586 * achieve that by always pre-iterating before returning
1589 static struct task_struct
*
1590 __load_balance_iterator(struct cfs_rq
*cfs_rq
, struct list_head
*next
)
1592 struct task_struct
*p
= NULL
;
1593 struct sched_entity
*se
;
1595 if (next
== &cfs_rq
->tasks
)
1598 se
= list_entry(next
, struct sched_entity
, group_node
);
1600 cfs_rq
->balance_iterator
= next
->next
;
1605 static struct task_struct
*load_balance_start_fair(void *arg
)
1607 struct cfs_rq
*cfs_rq
= arg
;
1609 return __load_balance_iterator(cfs_rq
, cfs_rq
->tasks
.next
);
1612 static struct task_struct
*load_balance_next_fair(void *arg
)
1614 struct cfs_rq
*cfs_rq
= arg
;
1616 return __load_balance_iterator(cfs_rq
, cfs_rq
->balance_iterator
);
1619 static unsigned long
1620 __load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
1621 unsigned long max_load_move
, struct sched_domain
*sd
,
1622 enum cpu_idle_type idle
, int *all_pinned
, int *this_best_prio
,
1623 struct cfs_rq
*cfs_rq
)
1625 struct rq_iterator cfs_rq_iterator
;
1627 cfs_rq_iterator
.start
= load_balance_start_fair
;
1628 cfs_rq_iterator
.next
= load_balance_next_fair
;
1629 cfs_rq_iterator
.arg
= cfs_rq
;
1631 return balance_tasks(this_rq
, this_cpu
, busiest
,
1632 max_load_move
, sd
, idle
, all_pinned
,
1633 this_best_prio
, &cfs_rq_iterator
);
1636 #ifdef CONFIG_FAIR_GROUP_SCHED
1637 static unsigned long
1638 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
1639 unsigned long max_load_move
,
1640 struct sched_domain
*sd
, enum cpu_idle_type idle
,
1641 int *all_pinned
, int *this_best_prio
)
1643 long rem_load_move
= max_load_move
;
1644 int busiest_cpu
= cpu_of(busiest
);
1645 struct task_group
*tg
;
1648 update_h_load(busiest_cpu
);
1650 list_for_each_entry_rcu(tg
, &task_groups
, list
) {
1651 struct cfs_rq
*busiest_cfs_rq
= tg
->cfs_rq
[busiest_cpu
];
1652 unsigned long busiest_h_load
= busiest_cfs_rq
->h_load
;
1653 unsigned long busiest_weight
= busiest_cfs_rq
->load
.weight
;
1654 u64 rem_load
, moved_load
;
1659 if (!busiest_cfs_rq
->task_weight
)
1662 rem_load
= (u64
)rem_load_move
* busiest_weight
;
1663 rem_load
= div_u64(rem_load
, busiest_h_load
+ 1);
1665 moved_load
= __load_balance_fair(this_rq
, this_cpu
, busiest
,
1666 rem_load
, sd
, idle
, all_pinned
, this_best_prio
,
1667 tg
->cfs_rq
[busiest_cpu
]);
1672 moved_load
*= busiest_h_load
;
1673 moved_load
= div_u64(moved_load
, busiest_weight
+ 1);
1675 rem_load_move
-= moved_load
;
1676 if (rem_load_move
< 0)
1681 return max_load_move
- rem_load_move
;
1684 static unsigned long
1685 load_balance_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
1686 unsigned long max_load_move
,
1687 struct sched_domain
*sd
, enum cpu_idle_type idle
,
1688 int *all_pinned
, int *this_best_prio
)
1690 return __load_balance_fair(this_rq
, this_cpu
, busiest
,
1691 max_load_move
, sd
, idle
, all_pinned
,
1692 this_best_prio
, &busiest
->cfs
);
1697 move_one_task_fair(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
1698 struct sched_domain
*sd
, enum cpu_idle_type idle
)
1700 struct cfs_rq
*busy_cfs_rq
;
1701 struct rq_iterator cfs_rq_iterator
;
1703 cfs_rq_iterator
.start
= load_balance_start_fair
;
1704 cfs_rq_iterator
.next
= load_balance_next_fair
;
1706 for_each_leaf_cfs_rq(busiest
, busy_cfs_rq
) {
1708 * pass busy_cfs_rq argument into
1709 * load_balance_[start|next]_fair iterators
1711 cfs_rq_iterator
.arg
= busy_cfs_rq
;
1712 if (iter_move_one_task(this_rq
, this_cpu
, busiest
, sd
, idle
,
1719 #endif /* CONFIG_SMP */
1722 * scheduler tick hitting a task of our scheduling class:
1724 static void task_tick_fair(struct rq
*rq
, struct task_struct
*curr
, int queued
)
1726 struct cfs_rq
*cfs_rq
;
1727 struct sched_entity
*se
= &curr
->se
;
1729 for_each_sched_entity(se
) {
1730 cfs_rq
= cfs_rq_of(se
);
1731 entity_tick(cfs_rq
, se
, queued
);
1736 * Share the fairness runtime between parent and child, thus the
1737 * total amount of pressure for CPU stays equal - new tasks
1738 * get a chance to run but frequent forkers are not allowed to
1739 * monopolize the CPU. Note: the parent runqueue is locked,
1740 * the child is not running yet.
1742 static void task_new_fair(struct rq
*rq
, struct task_struct
*p
)
1744 struct cfs_rq
*cfs_rq
= task_cfs_rq(p
);
1745 struct sched_entity
*se
= &p
->se
, *curr
= cfs_rq
->curr
;
1746 int this_cpu
= smp_processor_id();
1748 sched_info_queued(p
);
1750 update_curr(cfs_rq
);
1751 place_entity(cfs_rq
, se
, 1);
1753 /* 'curr' will be NULL if the child belongs to a different group */
1754 if (sysctl_sched_child_runs_first
&& this_cpu
== task_cpu(p
) &&
1755 curr
&& entity_before(curr
, se
)) {
1757 * Upon rescheduling, sched_class::put_prev_task() will place
1758 * 'current' within the tree based on its new key value.
1760 swap(curr
->vruntime
, se
->vruntime
);
1761 resched_task(rq
->curr
);
1764 enqueue_task_fair(rq
, p
, 0);
1768 * Priority of the task has changed. Check to see if we preempt
1771 static void prio_changed_fair(struct rq
*rq
, struct task_struct
*p
,
1772 int oldprio
, int running
)
1775 * Reschedule if we are currently running on this runqueue and
1776 * our priority decreased, or if we are not currently running on
1777 * this runqueue and our priority is higher than the current's
1780 if (p
->prio
> oldprio
)
1781 resched_task(rq
->curr
);
1783 check_preempt_curr(rq
, p
, 0);
1787 * We switched to the sched_fair class.
1789 static void switched_to_fair(struct rq
*rq
, struct task_struct
*p
,
1793 * We were most likely switched from sched_rt, so
1794 * kick off the schedule if running, otherwise just see
1795 * if we can still preempt the current task.
1798 resched_task(rq
->curr
);
1800 check_preempt_curr(rq
, p
, 0);
1803 /* Account for a task changing its policy or group.
1805 * This routine is mostly called to set cfs_rq->curr field when a task
1806 * migrates between groups/classes.
1808 static void set_curr_task_fair(struct rq
*rq
)
1810 struct sched_entity
*se
= &rq
->curr
->se
;
1812 for_each_sched_entity(se
)
1813 set_next_entity(cfs_rq_of(se
), se
);
1816 #ifdef CONFIG_FAIR_GROUP_SCHED
1817 static void moved_group_fair(struct task_struct
*p
)
1819 struct cfs_rq
*cfs_rq
= task_cfs_rq(p
);
1821 update_curr(cfs_rq
);
1822 place_entity(cfs_rq
, &p
->se
, 1);
1827 * All the scheduling class methods:
1829 static const struct sched_class fair_sched_class
= {
1830 .next
= &idle_sched_class
,
1831 .enqueue_task
= enqueue_task_fair
,
1832 .dequeue_task
= dequeue_task_fair
,
1833 .yield_task
= yield_task_fair
,
1835 .check_preempt_curr
= check_preempt_wakeup
,
1837 .pick_next_task
= pick_next_task_fair
,
1838 .put_prev_task
= put_prev_task_fair
,
1841 .select_task_rq
= select_task_rq_fair
,
1843 .load_balance
= load_balance_fair
,
1844 .move_one_task
= move_one_task_fair
,
1847 .set_curr_task
= set_curr_task_fair
,
1848 .task_tick
= task_tick_fair
,
1849 .task_new
= task_new_fair
,
1851 .prio_changed
= prio_changed_fair
,
1852 .switched_to
= switched_to_fair
,
1854 #ifdef CONFIG_FAIR_GROUP_SCHED
1855 .moved_group
= moved_group_fair
,
1859 #ifdef CONFIG_SCHED_DEBUG
1860 static void print_cfs_stats(struct seq_file
*m
, int cpu
)
1862 struct cfs_rq
*cfs_rq
;
1865 for_each_leaf_cfs_rq(cpu_rq(cpu
), cfs_rq
)
1866 print_cfs_rq(m
, cpu
, cfs_rq
);