4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991-2002 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
20 #include <linux/nmi.h>
21 #include <linux/init.h>
22 #include <asm/uaccess.h>
23 #include <linux/highmem.h>
24 #include <linux/smp_lock.h>
25 #include <asm/mmu_context.h>
26 #include <linux/interrupt.h>
27 #include <linux/completion.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/security.h>
30 #include <linux/notifier.h>
31 #include <linux/blkdev.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/rcupdate.h>
35 #include <linux/cpu.h>
38 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
40 #define cpu_to_node_mask(cpu) (cpu_online_map)
44 * Convert user-nice values [ -20 ... 0 ... 19 ]
45 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
48 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
49 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
50 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
53 * 'User priority' is the nice value converted to something we
54 * can work with better when scaling various scheduler parameters,
55 * it's a [ 0 ... 39 ] range.
57 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
58 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
59 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
62 * These are the 'tuning knobs' of the scheduler:
64 * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
65 * maximum timeslice is 200 msecs. Timeslices get refilled after
68 #define MIN_TIMESLICE ( 10 * HZ / 1000)
69 #define MAX_TIMESLICE (200 * HZ / 1000)
70 #define CHILD_PENALTY 50
71 #define PARENT_PENALTY 100
73 #define PRIO_BONUS_RATIO 25
74 #define INTERACTIVE_DELTA 2
75 #define MAX_SLEEP_AVG (10*HZ)
76 #define STARVATION_LIMIT (10*HZ)
77 #define NODE_THRESHOLD 125
80 * If a task is 'interactive' then we reinsert it in the active
81 * array after it has expired its current timeslice. (it will not
82 * continue to run immediately, it will still roundrobin with
83 * other interactive tasks.)
85 * This part scales the interactivity limit depending on niceness.
87 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
88 * Here are a few examples of different nice levels:
90 * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
91 * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
92 * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
93 * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
94 * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
96 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
97 * priority range a task can explore, a value of '1' means the
98 * task is rated interactive.)
100 * Ie. nice +19 tasks can never get 'interactive' enough to be
101 * reinserted into the active array. And only heavily CPU-hog nice -20
102 * tasks will be expired. Default nice 0 tasks are somewhere between,
103 * it takes some effort for them to get interactive, but it's not
107 #define SCALE(v1,v1_max,v2_max) \
108 (v1) * (v2_max) / (v1_max)
111 (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \
114 #define TASK_INTERACTIVE(p) \
115 ((p)->prio <= (p)->static_prio - DELTA(p))
118 * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
119 * to time slice values.
121 * The higher a thread's priority, the bigger timeslices
122 * it gets during one round of execution. But even the lowest
123 * priority thread gets MIN_TIMESLICE worth of execution time.
125 * task_timeslice() is the interface that is used by the scheduler.
128 #define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
129 ((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1)))
131 static inline unsigned int task_timeslice(task_t
*p
)
133 return BASE_TIMESLICE(p
);
137 * These are the runqueue data structures:
140 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
142 typedef struct runqueue runqueue_t
;
146 unsigned long bitmap
[BITMAP_SIZE
];
147 struct list_head queue
[MAX_PRIO
];
151 * This is the main, per-CPU runqueue data structure.
153 * Locking rule: those places that want to lock multiple runqueues
154 * (such as the load balancing or the thread migration code), lock
155 * acquire operations must be ordered by ascending &runqueue.
159 unsigned long nr_running
, nr_switches
, expired_timestamp
,
162 struct mm_struct
*prev_mm
;
163 prio_array_t
*active
, *expired
, arrays
[2];
164 int prev_cpu_load
[NR_CPUS
];
166 atomic_t
*node_nr_running
;
167 int prev_node_load
[MAX_NUMNODES
];
169 task_t
*migration_thread
;
170 struct list_head migration_queue
;
173 } ____cacheline_aligned
;
175 static struct runqueue runqueues
[NR_CPUS
] __cacheline_aligned
;
177 #define cpu_rq(cpu) (runqueues + (cpu))
178 #define this_rq() cpu_rq(smp_processor_id())
179 #define task_rq(p) cpu_rq(task_cpu(p))
180 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
181 #define rt_task(p) ((p)->prio < MAX_RT_PRIO)
184 * Default context-switch locking:
186 #ifndef prepare_arch_switch
187 # define prepare_arch_switch(rq, next) do { } while(0)
188 # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
189 # define task_running(rq, p) ((rq)->curr == (p))
195 * Keep track of running tasks.
198 static atomic_t node_nr_running
[MAX_NUMNODES
] ____cacheline_maxaligned_in_smp
=
199 {[0 ...MAX_NUMNODES
-1] = ATOMIC_INIT(0)};
201 static inline void nr_running_init(struct runqueue
*rq
)
203 rq
->node_nr_running
= &node_nr_running
[0];
206 static inline void nr_running_inc(runqueue_t
*rq
)
208 atomic_inc(rq
->node_nr_running
);
212 static inline void nr_running_dec(runqueue_t
*rq
)
214 atomic_dec(rq
->node_nr_running
);
218 __init
void node_nr_running_init(void)
222 for (i
= 0; i
< NR_CPUS
; i
++) {
224 cpu_rq(i
)->node_nr_running
=
225 &node_nr_running
[cpu_to_node(i
)];
229 #else /* !CONFIG_NUMA */
231 # define nr_running_init(rq) do { } while (0)
232 # define nr_running_inc(rq) do { (rq)->nr_running++; } while (0)
233 # define nr_running_dec(rq) do { (rq)->nr_running--; } while (0)
235 #endif /* CONFIG_NUMA */
238 * task_rq_lock - lock the runqueue a given task resides on and disable
239 * interrupts. Note the ordering: we can safely lookup the task_rq without
240 * explicitly disabling preemption.
242 static inline runqueue_t
*task_rq_lock(task_t
*p
, unsigned long *flags
)
247 local_irq_save(*flags
);
249 spin_lock(&rq
->lock
);
250 if (unlikely(rq
!= task_rq(p
))) {
251 spin_unlock_irqrestore(&rq
->lock
, *flags
);
252 goto repeat_lock_task
;
257 static inline void task_rq_unlock(runqueue_t
*rq
, unsigned long *flags
)
259 spin_unlock_irqrestore(&rq
->lock
, *flags
);
263 * rq_lock - lock a given runqueue and disable interrupts.
265 static inline runqueue_t
*this_rq_lock(void)
271 spin_lock(&rq
->lock
);
276 static inline void rq_unlock(runqueue_t
*rq
)
278 spin_unlock_irq(&rq
->lock
);
282 * Adding/removing a task to/from a priority array:
284 static inline void dequeue_task(struct task_struct
*p
, prio_array_t
*array
)
287 list_del(&p
->run_list
);
288 if (list_empty(array
->queue
+ p
->prio
))
289 __clear_bit(p
->prio
, array
->bitmap
);
292 static inline void enqueue_task(struct task_struct
*p
, prio_array_t
*array
)
294 list_add_tail(&p
->run_list
, array
->queue
+ p
->prio
);
295 __set_bit(p
->prio
, array
->bitmap
);
301 * effective_prio - return the priority that is based on the static
302 * priority but is modified by bonuses/penalties.
304 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
305 * into the -5 ... 0 ... +5 bonus/penalty range.
307 * We use 25% of the full 0...39 priority range so that:
309 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
310 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
312 * Both properties are important to certain workloads.
314 static int effective_prio(task_t
*p
)
321 bonus
= MAX_USER_PRIO
*PRIO_BONUS_RATIO
*p
->sleep_avg
/MAX_SLEEP_AVG
/100 -
322 MAX_USER_PRIO
*PRIO_BONUS_RATIO
/100/2;
324 prio
= p
->static_prio
- bonus
;
325 if (prio
< MAX_RT_PRIO
)
327 if (prio
> MAX_PRIO
-1)
333 * __activate_task - move a task to the runqueue.
335 static inline void __activate_task(task_t
*p
, runqueue_t
*rq
)
337 enqueue_task(p
, rq
->active
);
342 * activate_task - move a task to the runqueue and do priority recalculation
344 * Update all the scheduling statistics stuff. (sleep average
345 * calculation, priority modifiers, etc.)
347 static inline void activate_task(task_t
*p
, runqueue_t
*rq
)
349 long sleep_time
= jiffies
- p
->last_run
- 1;
351 if (sleep_time
> 0) {
355 * This code gives a bonus to interactive tasks.
357 * The boost works by updating the 'average sleep time'
358 * value here, based on ->last_run. The more time a task
359 * spends sleeping, the higher the average gets - and the
360 * higher the priority boost gets as well.
362 sleep_avg
= p
->sleep_avg
+ sleep_time
;
365 * 'Overflow' bonus ticks go to the waker as well, so the
366 * ticks are not lost. This has the effect of further
367 * boosting tasks that are related to maximum-interactive
370 if (sleep_avg
> MAX_SLEEP_AVG
)
371 sleep_avg
= MAX_SLEEP_AVG
;
372 if (p
->sleep_avg
!= sleep_avg
) {
373 p
->sleep_avg
= sleep_avg
;
374 p
->prio
= effective_prio(p
);
377 __activate_task(p
, rq
);
381 * deactivate_task - remove a task from the runqueue.
383 static inline void deactivate_task(struct task_struct
*p
, runqueue_t
*rq
)
386 if (p
->state
== TASK_UNINTERRUPTIBLE
)
387 rq
->nr_uninterruptible
++;
388 dequeue_task(p
, p
->array
);
393 * resched_task - mark a task 'to be rescheduled now'.
395 * On UP this means the setting of the need_resched flag, on SMP it
396 * might also involve a cross-CPU call to trigger the scheduler on
399 static inline void resched_task(task_t
*p
)
402 int need_resched
, nrpolling
;
405 /* minimise the chance of sending an interrupt to poll_idle() */
406 nrpolling
= test_tsk_thread_flag(p
,TIF_POLLING_NRFLAG
);
407 need_resched
= test_and_set_tsk_thread_flag(p
,TIF_NEED_RESCHED
);
408 nrpolling
|= test_tsk_thread_flag(p
,TIF_POLLING_NRFLAG
);
410 if (!need_resched
&& !nrpolling
&& (task_cpu(p
) != smp_processor_id()))
411 smp_send_reschedule(task_cpu(p
));
414 set_tsk_need_resched(p
);
421 * wait_task_inactive - wait for a thread to unschedule.
423 * The caller must ensure that the task *will* unschedule sometime soon,
424 * else this function might spin for a *long* time. This function can't
425 * be called with interrupts off, or it may introduce deadlock with
426 * smp_call_function() if an IPI is sent by the same process we are
427 * waiting to become inactive.
429 void wait_task_inactive(task_t
* p
)
437 if (unlikely(task_running(rq
, p
))) {
440 * enable/disable preemption just to make this
441 * a preemption point - we are busy-waiting
447 rq
= task_rq_lock(p
, &flags
);
448 if (unlikely(task_running(rq
, p
))) {
449 task_rq_unlock(rq
, &flags
);
453 task_rq_unlock(rq
, &flags
);
459 * try_to_wake_up - wake up a thread
460 * @p: the to-be-woken-up thread
461 * @state: the mask of task states that can be woken
462 * @sync: do a synchronous wakeup?
463 * @kick: kick the CPU if the task is already running?
465 * Put it on the run-queue if it's not already there. The "current"
466 * thread is always on the run-queue (except when the actual
467 * re-schedule is in progress), and as such you're allowed to do
468 * the simpler "current->state = TASK_RUNNING" to mark yourself
469 * runnable without the overhead of this.
471 * returns failure only if the task is already active.
473 static int try_to_wake_up(task_t
* p
, unsigned int state
, int sync
, int kick
)
481 rq
= task_rq_lock(p
, &flags
);
482 old_state
= p
->state
;
483 if (old_state
& state
) {
486 * Fast-migrate the task if it's not running or runnable
487 * currently. Do not violate hard affinity.
489 if (unlikely(sync
&& !task_running(rq
, p
) &&
490 (task_cpu(p
) != smp_processor_id()) &&
491 (p
->cpus_allowed
& (1UL << smp_processor_id())))) {
493 set_task_cpu(p
, smp_processor_id());
494 task_rq_unlock(rq
, &flags
);
495 goto repeat_lock_task
;
497 if (old_state
== TASK_UNINTERRUPTIBLE
)
498 rq
->nr_uninterruptible
--;
500 __activate_task(p
, rq
);
502 activate_task(p
, rq
);
503 if (p
->prio
< rq
->curr
->prio
)
504 resched_task(rq
->curr
);
510 if (unlikely(kick
) && task_running(rq
, p
) && (p
->thread_info
->cpu
!= smp_processor_id()))
511 smp_send_reschedule(p
->thread_info
->cpu
);
513 p
->state
= TASK_RUNNING
;
515 task_rq_unlock(rq
, &flags
);
520 int wake_up_process(task_t
* p
)
522 return try_to_wake_up(p
, TASK_STOPPED
| TASK_INTERRUPTIBLE
| TASK_UNINTERRUPTIBLE
, 0, 0);
525 int wake_up_process_kick(task_t
* p
)
527 return try_to_wake_up(p
, TASK_STOPPED
| TASK_INTERRUPTIBLE
| TASK_UNINTERRUPTIBLE
, 0, 1);
530 int wake_up_state(task_t
*p
, unsigned int state
)
532 return try_to_wake_up(p
, state
, 0, 0);
536 * wake_up_forked_process - wake up a freshly forked process.
538 * This function will do some initial scheduler statistics housekeeping
539 * that must be done for every newly created process.
541 void wake_up_forked_process(task_t
* p
)
544 runqueue_t
*rq
= task_rq_lock(current
, &flags
);
546 p
->state
= TASK_RUNNING
;
548 * We decrease the sleep average of forking parents
549 * and children as well, to keep max-interactive tasks
550 * from forking tasks that are max-interactive.
552 current
->sleep_avg
= current
->sleep_avg
* PARENT_PENALTY
/ 100;
553 p
->sleep_avg
= p
->sleep_avg
* CHILD_PENALTY
/ 100;
554 p
->prio
= effective_prio(p
);
555 set_task_cpu(p
, smp_processor_id());
557 if (unlikely(!current
->array
))
558 __activate_task(p
, rq
);
560 p
->prio
= current
->prio
;
561 list_add_tail(&p
->run_list
, ¤t
->run_list
);
562 p
->array
= current
->array
;
563 p
->array
->nr_active
++;
566 task_rq_unlock(rq
, &flags
);
570 * Potentially available exiting-child timeslices are
571 * retrieved here - this way the parent does not get
572 * penalized for creating too many threads.
574 * (this cannot be used to 'generate' timeslices
575 * artificially, because any timeslice recovered here
576 * was given away by the parent in the first place.)
578 void sched_exit(task_t
* p
)
582 local_irq_save(flags
);
583 if (p
->first_time_slice
) {
584 p
->parent
->time_slice
+= p
->time_slice
;
585 if (unlikely(p
->parent
->time_slice
> MAX_TIMESLICE
))
586 p
->parent
->time_slice
= MAX_TIMESLICE
;
588 local_irq_restore(flags
);
590 * If the child was a (relative-) CPU hog then decrease
591 * the sleep_avg of the parent as well.
593 if (p
->sleep_avg
< p
->parent
->sleep_avg
)
594 p
->parent
->sleep_avg
= (p
->parent
->sleep_avg
* EXIT_WEIGHT
+
595 p
->sleep_avg
) / (EXIT_WEIGHT
+ 1);
599 * finish_task_switch - clean up after a task-switch
600 * @prev: the thread we just switched away from.
602 * We enter this with the runqueue still locked, and finish_arch_switch()
603 * will unlock it along with doing any other architecture-specific cleanup
606 * Note that we may have delayed dropping an mm in context_switch(). If
607 * so, we finish that here outside of the runqueue lock. (Doing it
608 * with the lock held can cause deadlocks; see schedule() for
611 static inline void finish_task_switch(task_t
*prev
)
613 runqueue_t
*rq
= this_rq();
614 struct mm_struct
*mm
= rq
->prev_mm
;
617 finish_arch_switch(rq
, prev
);
620 if (prev
->state
& (TASK_DEAD
| TASK_ZOMBIE
))
621 put_task_struct(prev
);
625 * schedule_tail - first thing a freshly forked thread must call.
626 * @prev: the thread we just switched away from.
628 asmlinkage
void schedule_tail(task_t
*prev
)
630 finish_task_switch(prev
);
632 if (current
->set_child_tid
)
633 put_user(current
->pid
, current
->set_child_tid
);
637 * context_switch - switch to the new MM and the new
638 * thread's register state.
640 static inline task_t
* context_switch(runqueue_t
*rq
, task_t
*prev
, task_t
*next
)
642 struct mm_struct
*mm
= next
->mm
;
643 struct mm_struct
*oldmm
= prev
->active_mm
;
646 next
->active_mm
= oldmm
;
647 atomic_inc(&oldmm
->mm_count
);
648 enter_lazy_tlb(oldmm
, next
, smp_processor_id());
650 switch_mm(oldmm
, mm
, next
, smp_processor_id());
652 if (unlikely(!prev
->mm
)) {
653 prev
->active_mm
= NULL
;
654 WARN_ON(rq
->prev_mm
);
658 /* Here we just switch the register state and the stack. */
659 switch_to(prev
, next
, prev
);
665 * nr_running, nr_uninterruptible and nr_context_switches:
667 * externally visible scheduler statistics: current number of runnable
668 * threads, current number of uninterruptible-sleeping threads, total
669 * number of context switches performed since bootup.
671 unsigned long nr_running(void)
673 unsigned long i
, sum
= 0;
675 for (i
= 0; i
< NR_CPUS
; i
++)
676 sum
+= cpu_rq(i
)->nr_running
;
681 unsigned long nr_uninterruptible(void)
683 unsigned long i
, sum
= 0;
685 for (i
= 0; i
< NR_CPUS
; i
++) {
688 sum
+= cpu_rq(i
)->nr_uninterruptible
;
693 unsigned long nr_context_switches(void)
695 unsigned long i
, sum
= 0;
697 for (i
= 0; i
< NR_CPUS
; i
++) {
700 sum
+= cpu_rq(i
)->nr_switches
;
705 unsigned long nr_iowait(void)
707 unsigned long i
, sum
= 0;
709 for (i
= 0; i
< NR_CPUS
; ++i
) {
712 sum
+= atomic_read(&cpu_rq(i
)->nr_iowait
);
718 * double_rq_lock - safely lock two runqueues
720 * Note this does not disable interrupts like task_rq_lock,
721 * you need to do so manually before calling.
723 static inline void double_rq_lock(runqueue_t
*rq1
, runqueue_t
*rq2
)
726 spin_lock(&rq1
->lock
);
729 spin_lock(&rq1
->lock
);
730 spin_lock(&rq2
->lock
);
732 spin_lock(&rq2
->lock
);
733 spin_lock(&rq1
->lock
);
739 * double_rq_unlock - safely unlock two runqueues
741 * Note this does not restore interrupts like task_rq_unlock,
742 * you need to do so manually after calling.
744 static inline void double_rq_unlock(runqueue_t
*rq1
, runqueue_t
*rq2
)
746 spin_unlock(&rq1
->lock
);
748 spin_unlock(&rq2
->lock
);
753 * If dest_cpu is allowed for this process, migrate the task to it.
754 * This is accomplished by forcing the cpu_allowed mask to only
755 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
756 * the cpu_allowed mask is restored.
758 static void sched_migrate_task(task_t
*p
, int dest_cpu
)
760 unsigned long old_mask
;
762 old_mask
= p
->cpus_allowed
;
763 if (!(old_mask
& (1UL << dest_cpu
)))
765 /* force the process onto the specified CPU */
766 set_cpus_allowed(p
, 1UL << dest_cpu
);
768 /* restore the cpus allowed mask */
769 set_cpus_allowed(p
, old_mask
);
773 * Find the least loaded CPU. Slightly favor the current CPU by
774 * setting its runqueue length as the minimum to start.
776 static int sched_best_cpu(struct task_struct
*p
)
778 int i
, minload
, load
, best_cpu
, node
= 0;
779 unsigned long cpumask
;
781 best_cpu
= task_cpu(p
);
782 if (cpu_rq(best_cpu
)->nr_running
<= 2)
786 for_each_node_with_cpus(i
) {
787 load
= atomic_read(&node_nr_running
[i
]);
788 if (load
< minload
) {
795 cpumask
= node_to_cpumask(node
);
796 for (i
= 0; i
< NR_CPUS
; ++i
) {
797 if (!(cpumask
& (1UL << i
)))
799 if (cpu_rq(i
)->nr_running
< minload
) {
801 minload
= cpu_rq(i
)->nr_running
;
807 void sched_balance_exec(void)
812 new_cpu
= sched_best_cpu(current
);
813 if (new_cpu
!= smp_processor_id())
814 sched_migrate_task(current
, new_cpu
);
819 * Find the busiest node. All previous node loads contribute with a
820 * geometrically deccaying weight to the load measure:
821 * load_{t} = load_{t-1}/2 + nr_node_running_{t}
822 * This way sudden load peaks are flattened out a bit.
824 static int find_busiest_node(int this_node
)
826 int i
, node
= -1, load
, this_load
, maxload
;
828 this_load
= maxload
= (this_rq()->prev_node_load
[this_node
] >> 1)
829 + atomic_read(&node_nr_running
[this_node
]);
830 this_rq()->prev_node_load
[this_node
] = this_load
;
831 for (i
= 0; i
< numnodes
; i
++) {
834 load
= (this_rq()->prev_node_load
[i
] >> 1)
835 + atomic_read(&node_nr_running
[i
]);
836 this_rq()->prev_node_load
[i
] = load
;
837 if (load
> maxload
&& (100*load
> NODE_THRESHOLD
*this_load
)) {
845 #endif /* CONFIG_NUMA */
850 * double_lock_balance - lock the busiest runqueue
852 * this_rq is locked already. Recalculate nr_running if we have to
853 * drop the runqueue lock.
855 static inline unsigned int double_lock_balance(runqueue_t
*this_rq
,
856 runqueue_t
*busiest
, int this_cpu
, int idle
, unsigned int nr_running
)
858 if (unlikely(!spin_trylock(&busiest
->lock
))) {
859 if (busiest
< this_rq
) {
860 spin_unlock(&this_rq
->lock
);
861 spin_lock(&busiest
->lock
);
862 spin_lock(&this_rq
->lock
);
863 /* Need to recalculate nr_running */
864 if (idle
|| (this_rq
->nr_running
> this_rq
->prev_cpu_load
[this_cpu
]))
865 nr_running
= this_rq
->nr_running
;
867 nr_running
= this_rq
->prev_cpu_load
[this_cpu
];
869 spin_lock(&busiest
->lock
);
875 * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
877 static inline runqueue_t
*find_busiest_queue(runqueue_t
*this_rq
, int this_cpu
, int idle
, int *imbalance
, unsigned long cpumask
)
879 int nr_running
, load
, max_load
, i
;
880 runqueue_t
*busiest
, *rq_src
;
883 * We search all runqueues to find the most busy one.
884 * We do this lockless to reduce cache-bouncing overhead,
885 * we re-check the 'best' source CPU later on again, with
888 * We fend off statistical fluctuations in runqueue lengths by
889 * saving the runqueue length during the previous load-balancing
890 * operation and using the smaller one the current and saved lengths.
891 * If a runqueue is long enough for a longer amount of time then
892 * we recognize it and pull tasks from it.
894 * The 'current runqueue length' is a statistical maximum variable,
895 * for that one we take the longer one - to avoid fluctuations in
896 * the other direction. So for a load-balance to happen it needs
897 * stable long runqueue on the target CPU and stable short runqueue
898 * on the local runqueue.
900 * We make an exception if this CPU is about to become idle - in
901 * that case we are less picky about moving a task across CPUs and
902 * take what can be taken.
904 if (idle
|| (this_rq
->nr_running
> this_rq
->prev_cpu_load
[this_cpu
]))
905 nr_running
= this_rq
->nr_running
;
907 nr_running
= this_rq
->prev_cpu_load
[this_cpu
];
911 for (i
= 0; i
< NR_CPUS
; i
++) {
912 if (!((1UL << i
) & cpumask
))
916 if (idle
|| (rq_src
->nr_running
< this_rq
->prev_cpu_load
[i
]))
917 load
= rq_src
->nr_running
;
919 load
= this_rq
->prev_cpu_load
[i
];
920 this_rq
->prev_cpu_load
[i
] = rq_src
->nr_running
;
922 if ((load
> max_load
) && (rq_src
!= this_rq
)) {
928 if (likely(!busiest
))
931 *imbalance
= (max_load
- nr_running
) / 2;
933 /* It needs an at least ~25% imbalance to trigger balancing. */
934 if (!idle
&& (*imbalance
< (max_load
+ 3)/4)) {
939 nr_running
= double_lock_balance(this_rq
, busiest
, this_cpu
, idle
, nr_running
);
941 * Make sure nothing changed since we checked the
944 if (busiest
->nr_running
<= nr_running
+ 1) {
945 spin_unlock(&busiest
->lock
);
953 * pull_task - move a task from a remote runqueue to the local runqueue.
954 * Both runqueues must be locked.
956 static inline void pull_task(runqueue_t
*src_rq
, prio_array_t
*src_array
, task_t
*p
, runqueue_t
*this_rq
, int this_cpu
)
958 dequeue_task(p
, src_array
);
959 nr_running_dec(src_rq
);
960 set_task_cpu(p
, this_cpu
);
961 nr_running_inc(this_rq
);
962 enqueue_task(p
, this_rq
->active
);
964 * Note that idle threads have a prio of MAX_PRIO, for this test
965 * to be always true for them.
967 if (p
->prio
< this_rq
->curr
->prio
)
970 if (p
->prio
== this_rq
->curr
->prio
&&
971 p
->time_slice
> this_rq
->curr
->time_slice
)
977 * Current runqueue is empty, or rebalance tick: if there is an
978 * inbalance (current runqueue is too short) then pull from
979 * busiest runqueue(s).
981 * We call this with the current runqueue locked,
984 static void load_balance(runqueue_t
*this_rq
, int idle
, unsigned long cpumask
)
986 int imbalance
, idx
, this_cpu
= smp_processor_id();
989 struct list_head
*head
, *curr
;
992 busiest
= find_busiest_queue(this_rq
, this_cpu
, idle
, &imbalance
, cpumask
);
997 * We first consider expired tasks. Those will likely not be
998 * executed in the near future, and they are most likely to
999 * be cache-cold, thus switching CPUs has the least effect
1002 if (busiest
->expired
->nr_active
)
1003 array
= busiest
->expired
;
1005 array
= busiest
->active
;
1008 /* Start searching at priority 0: */
1012 idx
= sched_find_first_bit(array
->bitmap
);
1014 idx
= find_next_bit(array
->bitmap
, MAX_PRIO
, idx
);
1015 if (idx
>= MAX_PRIO
) {
1016 if (array
== busiest
->expired
) {
1017 array
= busiest
->active
;
1023 head
= array
->queue
+ idx
;
1026 tmp
= list_entry(curr
, task_t
, run_list
);
1029 * We do not migrate tasks that are:
1030 * 1) running (obviously), or
1031 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1032 * 3) are cache-hot on their current CPU.
1035 #define CAN_MIGRATE_TASK(p,rq,this_cpu) \
1036 ((jiffies - (p)->last_run > cache_decay_ticks) && \
1037 !task_running(rq, p) && \
1038 ((p)->cpus_allowed & (1UL << (this_cpu))))
1042 if (!CAN_MIGRATE_TASK(tmp
, busiest
, this_cpu
)) {
1048 pull_task(busiest
, array
, tmp
, this_rq
, this_cpu
);
1049 if (!idle
&& --imbalance
) {
1056 spin_unlock(&busiest
->lock
);
1062 * One of the idle_cpu_tick() and busy_cpu_tick() functions will
1063 * get called every timer tick, on every CPU. Our balancing action
1064 * frequency and balancing agressivity depends on whether the CPU is
1067 * busy-rebalance every 200 msecs. idle-rebalance every 1 msec. (or on
1068 * systems with HZ=100, every 10 msecs.)
1070 * On NUMA, do a node-rebalance every 400 msecs.
1072 #define IDLE_REBALANCE_TICK (HZ/1000 ?: 1)
1073 #define BUSY_REBALANCE_TICK (HZ/5 ?: 1)
1074 #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * 5)
1075 #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * 2)
1078 static void balance_node(runqueue_t
*this_rq
, int idle
, int this_cpu
)
1080 int node
= find_busiest_node(cpu_to_node(this_cpu
));
1081 unsigned long cpumask
, this_cpumask
= 1UL << this_cpu
;
1084 cpumask
= node_to_cpumask(node
) | this_cpumask
;
1085 spin_lock(&this_rq
->lock
);
1086 load_balance(this_rq
, idle
, cpumask
);
1087 spin_unlock(&this_rq
->lock
);
1092 static void rebalance_tick(runqueue_t
*this_rq
, int idle
)
1095 int this_cpu
= smp_processor_id();
1097 unsigned long j
= jiffies
;
1100 * First do inter-node rebalancing, then intra-node rebalancing,
1101 * if both events happen in the same tick. The inter-node
1102 * rebalancing does not necessarily have to create a perfect
1103 * balance within the node, since we load-balance the most loaded
1104 * node with the current CPU. (ie. other CPUs in the local node
1105 * are not balanced.)
1109 if (!(j
% IDLE_NODE_REBALANCE_TICK
))
1110 balance_node(this_rq
, idle
, this_cpu
);
1112 if (!(j
% IDLE_REBALANCE_TICK
)) {
1113 spin_lock(&this_rq
->lock
);
1114 load_balance(this_rq
, idle
, cpu_to_node_mask(this_cpu
));
1115 spin_unlock(&this_rq
->lock
);
1120 if (!(j
% BUSY_NODE_REBALANCE_TICK
))
1121 balance_node(this_rq
, idle
, this_cpu
);
1123 if (!(j
% BUSY_REBALANCE_TICK
)) {
1124 spin_lock(&this_rq
->lock
);
1125 load_balance(this_rq
, idle
, cpu_to_node_mask(this_cpu
));
1126 spin_unlock(&this_rq
->lock
);
1131 * on UP we do not need to balance between CPUs:
1133 static inline void rebalance_tick(runqueue_t
*this_rq
, int idle
)
1138 DEFINE_PER_CPU(struct kernel_stat
, kstat
) = { { 0 } };
1141 * We place interactive tasks back into the active array, if possible.
1143 * To guarantee that this does not starve expired tasks we ignore the
1144 * interactivity of a task if the first expired task had to wait more
1145 * than a 'reasonable' amount of time. This deadline timeout is
1146 * load-dependent, as the frequency of array switched decreases with
1147 * increasing number of running tasks:
1149 #define EXPIRED_STARVING(rq) \
1150 (STARVATION_LIMIT && ((rq)->expired_timestamp && \
1151 (jiffies - (rq)->expired_timestamp >= \
1152 STARVATION_LIMIT * ((rq)->nr_running) + 1)))
1155 * This function gets called by the timer code, with HZ frequency.
1156 * We call it with interrupts disabled.
1158 * It also gets called by the fork code, when changing the parent's
1161 void scheduler_tick(int user_ticks
, int sys_ticks
)
1163 int cpu
= smp_processor_id();
1164 runqueue_t
*rq
= this_rq();
1165 task_t
*p
= current
;
1167 if (rcu_pending(cpu
))
1168 rcu_check_callbacks(cpu
, user_ticks
);
1170 if (p
== rq
->idle
) {
1171 /* note: this timer irq context must be accounted for as well */
1172 if (irq_count() - HARDIRQ_OFFSET
>= SOFTIRQ_OFFSET
)
1173 kstat_cpu(cpu
).cpustat
.system
+= sys_ticks
;
1174 else if (atomic_read(&rq
->nr_iowait
) > 0)
1175 kstat_cpu(cpu
).cpustat
.iowait
+= sys_ticks
;
1177 kstat_cpu(cpu
).cpustat
.idle
+= sys_ticks
;
1178 rebalance_tick(rq
, 1);
1181 if (TASK_NICE(p
) > 0)
1182 kstat_cpu(cpu
).cpustat
.nice
+= user_ticks
;
1184 kstat_cpu(cpu
).cpustat
.user
+= user_ticks
;
1185 kstat_cpu(cpu
).cpustat
.system
+= sys_ticks
;
1187 /* Task might have expired already, but not scheduled off yet */
1188 if (p
->array
!= rq
->active
) {
1189 set_tsk_need_resched(p
);
1192 spin_lock(&rq
->lock
);
1194 * The task was running during this tick - update the
1195 * time slice counter and the sleep average. Note: we
1196 * do not update a thread's priority until it either
1197 * goes to sleep or uses up its timeslice. This makes
1198 * it possible for interactive tasks to use up their
1199 * timeslices at their highest priority levels.
1203 if (unlikely(rt_task(p
))) {
1205 * RR tasks need a special form of timeslice management.
1206 * FIFO tasks have no timeslices.
1208 if ((p
->policy
== SCHED_RR
) && !--p
->time_slice
) {
1209 p
->time_slice
= task_timeslice(p
);
1210 p
->first_time_slice
= 0;
1211 set_tsk_need_resched(p
);
1213 /* put it at the end of the queue: */
1214 dequeue_task(p
, rq
->active
);
1215 enqueue_task(p
, rq
->active
);
1219 if (!--p
->time_slice
) {
1220 dequeue_task(p
, rq
->active
);
1221 set_tsk_need_resched(p
);
1222 p
->prio
= effective_prio(p
);
1223 p
->time_slice
= task_timeslice(p
);
1224 p
->first_time_slice
= 0;
1226 if (!TASK_INTERACTIVE(p
) || EXPIRED_STARVING(rq
)) {
1227 if (!rq
->expired_timestamp
)
1228 rq
->expired_timestamp
= jiffies
;
1229 enqueue_task(p
, rq
->expired
);
1231 enqueue_task(p
, rq
->active
);
1234 spin_unlock(&rq
->lock
);
1236 rebalance_tick(rq
, 0);
1239 void scheduling_functions_start_here(void) { }
1242 * schedule() is the main scheduler function.
1244 asmlinkage
void schedule(void)
1246 task_t
*prev
, *next
;
1248 prio_array_t
*array
;
1249 struct list_head
*queue
;
1253 * Test if we are atomic. Since do_exit() needs to call into
1254 * schedule() atomically, we ignore that path for now.
1255 * Otherwise, whine if we are scheduling when we should not be.
1257 if (likely(!(current
->state
& (TASK_DEAD
| TASK_ZOMBIE
)))) {
1258 if (unlikely(in_atomic())) {
1259 printk(KERN_ERR
"bad: scheduling while atomic!\n");
1269 release_kernel_lock(prev
);
1270 prev
->last_run
= jiffies
;
1271 spin_lock_irq(&rq
->lock
);
1274 * if entering off of a kernel preemption go straight
1275 * to picking the next task.
1277 if (unlikely(preempt_count() & PREEMPT_ACTIVE
))
1278 goto pick_next_task
;
1280 switch (prev
->state
) {
1281 case TASK_INTERRUPTIBLE
:
1282 if (unlikely(signal_pending(prev
))) {
1283 prev
->state
= TASK_RUNNING
;
1287 deactivate_task(prev
, rq
);
1292 if (unlikely(!rq
->nr_running
)) {
1294 load_balance(rq
, 1, cpu_to_node_mask(smp_processor_id()));
1296 goto pick_next_task
;
1299 rq
->expired_timestamp
= 0;
1304 if (unlikely(!array
->nr_active
)) {
1306 * Switch the active and expired arrays.
1308 rq
->active
= rq
->expired
;
1309 rq
->expired
= array
;
1311 rq
->expired_timestamp
= 0;
1314 idx
= sched_find_first_bit(array
->bitmap
);
1315 queue
= array
->queue
+ idx
;
1316 next
= list_entry(queue
->next
, task_t
, run_list
);
1320 clear_tsk_need_resched(prev
);
1321 RCU_qsctr(prev
->thread_info
->cpu
)++;
1323 if (likely(prev
!= next
)) {
1327 prepare_arch_switch(rq
, next
);
1328 prev
= context_switch(rq
, prev
, next
);
1331 finish_task_switch(prev
);
1333 spin_unlock_irq(&rq
->lock
);
1335 reacquire_kernel_lock(current
);
1336 preempt_enable_no_resched();
1337 if (test_thread_flag(TIF_NEED_RESCHED
))
1341 #ifdef CONFIG_PREEMPT
1343 * this is is the entry point to schedule() from in-kernel preemption
1344 * off of preempt_enable. Kernel preemptions off return from interrupt
1345 * occur there and call schedule directly.
1347 asmlinkage
void preempt_schedule(void)
1349 struct thread_info
*ti
= current_thread_info();
1352 * If there is a non-zero preempt_count or interrupts are disabled,
1353 * we do not want to preempt the current task. Just return..
1355 if (unlikely(ti
->preempt_count
|| irqs_disabled()))
1359 ti
->preempt_count
= PREEMPT_ACTIVE
;
1361 ti
->preempt_count
= 0;
1363 /* we could miss a preemption opportunity between schedule and now */
1365 if (unlikely(test_thread_flag(TIF_NEED_RESCHED
)))
1368 #endif /* CONFIG_PREEMPT */
1370 int default_wake_function(wait_queue_t
*curr
, unsigned mode
, int sync
)
1372 task_t
*p
= curr
->task
;
1373 return try_to_wake_up(p
, mode
, sync
, 0);
1377 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
1378 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1379 * number) then we wake all the non-exclusive tasks and one exclusive task.
1381 * There are circumstances in which we can try to wake a task which has already
1382 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1383 * zero in this (rare) case, and we handle it by continuing to scan the queue.
1385 static void __wake_up_common(wait_queue_head_t
*q
, unsigned int mode
, int nr_exclusive
, int sync
)
1387 struct list_head
*tmp
, *next
;
1389 list_for_each_safe(tmp
, next
, &q
->task_list
) {
1392 curr
= list_entry(tmp
, wait_queue_t
, task_list
);
1393 flags
= curr
->flags
;
1394 if (curr
->func(curr
, mode
, sync
) &&
1395 (flags
& WQ_FLAG_EXCLUSIVE
) &&
1402 * __wake_up - wake up threads blocked on a waitqueue.
1404 * @mode: which threads
1405 * @nr_exclusive: how many wake-one or wake-many threads to wake up
1407 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
, int nr_exclusive
)
1409 unsigned long flags
;
1411 spin_lock_irqsave(&q
->lock
, flags
);
1412 __wake_up_common(q
, mode
, nr_exclusive
, 0);
1413 spin_unlock_irqrestore(&q
->lock
, flags
);
1417 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
1419 void __wake_up_locked(wait_queue_head_t
*q
, unsigned int mode
)
1421 __wake_up_common(q
, mode
, 1, 0);
1425 * __wake_up - sync- wake up threads blocked on a waitqueue.
1427 * @mode: which threads
1428 * @nr_exclusive: how many wake-one or wake-many threads to wake up
1430 * The sync wakeup differs that the waker knows that it will schedule
1431 * away soon, so while the target thread will be woken up, it will not
1432 * be migrated to another CPU - ie. the two threads are 'synchronized'
1433 * with each other. This can prevent needless bouncing between CPUs.
1435 * On UP it can prevent extra preemption.
1437 void __wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
, int nr_exclusive
)
1439 unsigned long flags
;
1444 spin_lock_irqsave(&q
->lock
, flags
);
1445 if (likely(nr_exclusive
))
1446 __wake_up_common(q
, mode
, nr_exclusive
, 1);
1448 __wake_up_common(q
, mode
, nr_exclusive
, 0);
1449 spin_unlock_irqrestore(&q
->lock
, flags
);
1452 void complete(struct completion
*x
)
1454 unsigned long flags
;
1456 spin_lock_irqsave(&x
->wait
.lock
, flags
);
1458 __wake_up_common(&x
->wait
, TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
, 1, 0);
1459 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
1462 void complete_all(struct completion
*x
)
1464 unsigned long flags
;
1466 spin_lock_irqsave(&x
->wait
.lock
, flags
);
1467 x
->done
+= UINT_MAX
/2;
1468 __wake_up_common(&x
->wait
, TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
, 0, 0);
1469 spin_unlock_irqrestore(&x
->wait
.lock
, flags
);
1472 void wait_for_completion(struct completion
*x
)
1475 spin_lock_irq(&x
->wait
.lock
);
1477 DECLARE_WAITQUEUE(wait
, current
);
1479 wait
.flags
|= WQ_FLAG_EXCLUSIVE
;
1480 __add_wait_queue_tail(&x
->wait
, &wait
);
1482 __set_current_state(TASK_UNINTERRUPTIBLE
);
1483 spin_unlock_irq(&x
->wait
.lock
);
1485 spin_lock_irq(&x
->wait
.lock
);
1487 __remove_wait_queue(&x
->wait
, &wait
);
1490 spin_unlock_irq(&x
->wait
.lock
);
1493 #define SLEEP_ON_VAR \
1494 unsigned long flags; \
1495 wait_queue_t wait; \
1496 init_waitqueue_entry(&wait, current);
1498 #define SLEEP_ON_HEAD \
1499 spin_lock_irqsave(&q->lock,flags); \
1500 __add_wait_queue(q, &wait); \
1501 spin_unlock(&q->lock);
1503 #define SLEEP_ON_TAIL \
1504 spin_lock_irq(&q->lock); \
1505 __remove_wait_queue(q, &wait); \
1506 spin_unlock_irqrestore(&q->lock, flags);
1508 void interruptible_sleep_on(wait_queue_head_t
*q
)
1512 current
->state
= TASK_INTERRUPTIBLE
;
1519 long interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
1523 current
->state
= TASK_INTERRUPTIBLE
;
1526 timeout
= schedule_timeout(timeout
);
1532 void sleep_on(wait_queue_head_t
*q
)
1536 current
->state
= TASK_UNINTERRUPTIBLE
;
1543 long sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
1547 current
->state
= TASK_UNINTERRUPTIBLE
;
1550 timeout
= schedule_timeout(timeout
);
1556 void scheduling_functions_end_here(void) { }
1558 void set_user_nice(task_t
*p
, long nice
)
1560 unsigned long flags
;
1561 prio_array_t
*array
;
1564 if (TASK_NICE(p
) == nice
|| nice
< -20 || nice
> 19)
1567 * We have to be careful, if called from sys_setpriority(),
1568 * the task might be in the middle of scheduling on another CPU.
1570 rq
= task_rq_lock(p
, &flags
);
1572 p
->static_prio
= NICE_TO_PRIO(nice
);
1577 dequeue_task(p
, array
);
1578 p
->static_prio
= NICE_TO_PRIO(nice
);
1579 p
->prio
= NICE_TO_PRIO(nice
);
1581 enqueue_task(p
, array
);
1583 * If the task is running and lowered its priority,
1584 * or increased its priority then reschedule its CPU:
1586 if ((NICE_TO_PRIO(nice
) < p
->static_prio
) ||
1587 task_running(rq
, p
))
1588 resched_task(rq
->curr
);
1591 task_rq_unlock(rq
, &flags
);
1597 * sys_nice - change the priority of the current process.
1598 * @increment: priority increment
1600 * sys_setpriority is a more generic, but much slower function that
1601 * does similar things.
1603 asmlinkage
long sys_nice(int increment
)
1609 * Setpriority might change our priority at the same moment.
1610 * We don't have to worry. Conceptually one call occurs first
1611 * and we have a single winner.
1613 if (increment
< 0) {
1614 if (!capable(CAP_SYS_NICE
))
1616 if (increment
< -40)
1622 nice
= PRIO_TO_NICE(current
->static_prio
) + increment
;
1628 retval
= security_task_setnice(current
, nice
);
1632 set_user_nice(current
, nice
);
1639 * task_prio - return the priority value of a given task.
1640 * @p: the task in question.
1642 * This is the priority value as seen by users in /proc.
1643 * RT tasks are offset by -200. Normal tasks are centered
1644 * around 0, value goes from -16 to +15.
1646 int task_prio(task_t
*p
)
1648 return p
->prio
- MAX_RT_PRIO
;
1652 * task_nice - return the nice value of a given task.
1653 * @p: the task in question.
1655 int task_nice(task_t
*p
)
1657 return TASK_NICE(p
);
1661 * task_curr - is this task currently executing on a CPU?
1662 * @p: the task in question.
1664 int task_curr(task_t
*p
)
1666 return cpu_curr(task_cpu(p
)) == p
;
1670 * idle_cpu - is a given cpu idle currently?
1671 * @cpu: the processor in question.
1673 int idle_cpu(int cpu
)
1675 return cpu_curr(cpu
) == cpu_rq(cpu
)->idle
;
1679 * find_process_by_pid - find a process with a matching PID value.
1680 * @pid: the pid in question.
1682 static inline task_t
*find_process_by_pid(pid_t pid
)
1684 return pid
? find_task_by_pid(pid
) : current
;
1688 * setscheduler - change the scheduling policy and/or RT priority of a thread.
1690 static int setscheduler(pid_t pid
, int policy
, struct sched_param __user
*param
)
1692 struct sched_param lp
;
1693 int retval
= -EINVAL
;
1694 prio_array_t
*array
;
1695 unsigned long flags
;
1699 if (!param
|| pid
< 0)
1703 if (copy_from_user(&lp
, param
, sizeof(struct sched_param
)))
1707 * We play safe to avoid deadlocks.
1709 read_lock_irq(&tasklist_lock
);
1711 p
= find_process_by_pid(pid
);
1715 goto out_unlock_tasklist
;
1718 * To be able to change p->policy safely, the apropriate
1719 * runqueue lock must be held.
1721 rq
= task_rq_lock(p
, &flags
);
1727 if (policy
!= SCHED_FIFO
&& policy
!= SCHED_RR
&&
1728 policy
!= SCHED_NORMAL
)
1733 * Valid priorities for SCHED_FIFO and SCHED_RR are
1734 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
1737 if (lp
.sched_priority
< 0 || lp
.sched_priority
> MAX_USER_RT_PRIO
-1)
1739 if ((policy
== SCHED_NORMAL
) != (lp
.sched_priority
== 0))
1743 if ((policy
== SCHED_FIFO
|| policy
== SCHED_RR
) &&
1744 !capable(CAP_SYS_NICE
))
1746 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
1747 !capable(CAP_SYS_NICE
))
1750 retval
= security_task_setscheduler(p
, policy
, &lp
);
1756 deactivate_task(p
, task_rq(p
));
1759 p
->rt_priority
= lp
.sched_priority
;
1760 if (policy
!= SCHED_NORMAL
)
1761 p
->prio
= MAX_USER_RT_PRIO
-1 - p
->rt_priority
;
1763 p
->prio
= p
->static_prio
;
1765 __activate_task(p
, task_rq(p
));
1768 task_rq_unlock(rq
, &flags
);
1769 out_unlock_tasklist
:
1770 read_unlock_irq(&tasklist_lock
);
1777 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
1778 * @pid: the pid in question.
1779 * @policy: new policy
1780 * @param: structure containing the new RT priority.
1782 asmlinkage
long sys_sched_setscheduler(pid_t pid
, int policy
,
1783 struct sched_param __user
*param
)
1785 return setscheduler(pid
, policy
, param
);
1789 * sys_sched_setparam - set/change the RT priority of a thread
1790 * @pid: the pid in question.
1791 * @param: structure containing the new RT priority.
1793 asmlinkage
long sys_sched_setparam(pid_t pid
, struct sched_param __user
*param
)
1795 return setscheduler(pid
, -1, param
);
1799 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1800 * @pid: the pid in question.
1802 asmlinkage
long sys_sched_getscheduler(pid_t pid
)
1804 int retval
= -EINVAL
;
1811 read_lock(&tasklist_lock
);
1812 p
= find_process_by_pid(pid
);
1814 retval
= security_task_getscheduler(p
);
1818 read_unlock(&tasklist_lock
);
1825 * sys_sched_getscheduler - get the RT priority of a thread
1826 * @pid: the pid in question.
1827 * @param: structure containing the RT priority.
1829 asmlinkage
long sys_sched_getparam(pid_t pid
, struct sched_param __user
*param
)
1831 struct sched_param lp
;
1832 int retval
= -EINVAL
;
1835 if (!param
|| pid
< 0)
1838 read_lock(&tasklist_lock
);
1839 p
= find_process_by_pid(pid
);
1844 retval
= security_task_getscheduler(p
);
1848 lp
.sched_priority
= p
->rt_priority
;
1849 read_unlock(&tasklist_lock
);
1852 * This one might sleep, we cannot do it with a spinlock held ...
1854 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
1860 read_unlock(&tasklist_lock
);
1865 * sys_sched_setaffinity - set the cpu affinity of a process
1866 * @pid: pid of the process
1867 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1868 * @user_mask_ptr: user-space pointer to the new cpu mask
1870 asmlinkage
long sys_sched_setaffinity(pid_t pid
, unsigned int len
,
1871 unsigned long __user
*user_mask_ptr
)
1873 unsigned long new_mask
;
1877 if (len
< sizeof(new_mask
))
1880 if (copy_from_user(&new_mask
, user_mask_ptr
, sizeof(new_mask
)))
1883 read_lock(&tasklist_lock
);
1885 p
= find_process_by_pid(pid
);
1887 read_unlock(&tasklist_lock
);
1892 * It is not safe to call set_cpus_allowed with the
1893 * tasklist_lock held. We will bump the task_struct's
1894 * usage count and then drop tasklist_lock.
1897 read_unlock(&tasklist_lock
);
1900 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
1901 !capable(CAP_SYS_NICE
))
1904 retval
= set_cpus_allowed(p
, new_mask
);
1912 * sys_sched_getaffinity - get the cpu affinity of a process
1913 * @pid: pid of the process
1914 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1915 * @user_mask_ptr: user-space pointer to hold the current cpu mask
1917 asmlinkage
long sys_sched_getaffinity(pid_t pid
, unsigned int len
,
1918 unsigned long __user
*user_mask_ptr
)
1920 unsigned int real_len
;
1925 real_len
= sizeof(mask
);
1929 read_lock(&tasklist_lock
);
1932 p
= find_process_by_pid(pid
);
1937 mask
= p
->cpus_allowed
& cpu_online_map
;
1940 read_unlock(&tasklist_lock
);
1943 if (copy_to_user(user_mask_ptr
, &mask
, real_len
))
1949 * sys_sched_yield - yield the current processor to other threads.
1951 * this function yields the current CPU by moving the calling thread
1952 * to the expired array. If there are no other threads running on this
1953 * CPU then this function will return.
1955 asmlinkage
long sys_sched_yield(void)
1957 runqueue_t
*rq
= this_rq_lock();
1958 prio_array_t
*array
= current
->array
;
1961 * We implement yielding by moving the task into the expired
1964 * (special rule: RT tasks will just roundrobin in the active
1967 if (likely(!rt_task(current
))) {
1968 dequeue_task(current
, array
);
1969 enqueue_task(current
, rq
->expired
);
1971 list_del(¤t
->run_list
);
1972 list_add_tail(¤t
->run_list
, array
->queue
+ current
->prio
);
1975 * Since we are going to call schedule() anyway, there's
1976 * no need to preempt:
1978 _raw_spin_unlock(&rq
->lock
);
1979 preempt_enable_no_resched();
1986 void __cond_resched(void)
1988 set_current_state(TASK_RUNNING
);
1993 * yield - yield the current processor to other threads.
1995 * this is a shortcut for kernel-space yielding - it marks the
1996 * thread runnable and calls sys_sched_yield().
2000 set_current_state(TASK_RUNNING
);
2005 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
2006 * that process accounting knows that this is a task in IO wait state.
2008 * But don't do that if it is a deliberate, throttling IO wait (this task
2009 * has set its backing_dev_info: the queue against which it should throttle)
2011 void io_schedule(void)
2013 struct runqueue
*rq
= this_rq();
2015 atomic_inc(&rq
->nr_iowait
);
2017 atomic_dec(&rq
->nr_iowait
);
2020 long io_schedule_timeout(long timeout
)
2022 struct runqueue
*rq
= this_rq();
2025 atomic_inc(&rq
->nr_iowait
);
2026 ret
= schedule_timeout(timeout
);
2027 atomic_dec(&rq
->nr_iowait
);
2032 * sys_sched_get_priority_max - return maximum RT priority.
2033 * @policy: scheduling class.
2035 * this syscall returns the maximum rt_priority that can be used
2036 * by a given scheduling class.
2038 asmlinkage
long sys_sched_get_priority_max(int policy
)
2045 ret
= MAX_USER_RT_PRIO
-1;
2055 * sys_sched_get_priority_mix - return minimum RT priority.
2056 * @policy: scheduling class.
2058 * this syscall returns the minimum rt_priority that can be used
2059 * by a given scheduling class.
2061 asmlinkage
long sys_sched_get_priority_min(int policy
)
2077 * sys_sched_rr_get_interval - return the default timeslice of a process.
2078 * @pid: pid of the process.
2079 * @interval: userspace pointer to the timeslice value.
2081 * this syscall writes the default timeslice value of a given process
2082 * into the user-space timespec buffer. A value of '0' means infinity.
2084 asmlinkage
long sys_sched_rr_get_interval(pid_t pid
, struct timespec __user
*interval
)
2086 int retval
= -EINVAL
;
2094 read_lock(&tasklist_lock
);
2095 p
= find_process_by_pid(pid
);
2099 retval
= security_task_getscheduler(p
);
2103 jiffies_to_timespec(p
->policy
& SCHED_FIFO
?
2104 0 : task_timeslice(p
), &t
);
2105 read_unlock(&tasklist_lock
);
2106 retval
= copy_to_user(interval
, &t
, sizeof(t
)) ? -EFAULT
: 0;
2110 read_unlock(&tasklist_lock
);
2114 static inline struct task_struct
*eldest_child(struct task_struct
*p
)
2116 if (list_empty(&p
->children
)) return NULL
;
2117 return list_entry(p
->children
.next
,struct task_struct
,sibling
);
2120 static inline struct task_struct
*older_sibling(struct task_struct
*p
)
2122 if (p
->sibling
.prev
==&p
->parent
->children
) return NULL
;
2123 return list_entry(p
->sibling
.prev
,struct task_struct
,sibling
);
2126 static inline struct task_struct
*younger_sibling(struct task_struct
*p
)
2128 if (p
->sibling
.next
==&p
->parent
->children
) return NULL
;
2129 return list_entry(p
->sibling
.next
,struct task_struct
,sibling
);
2132 static void show_task(task_t
* p
)
2134 unsigned long free
= 0;
2137 static const char * stat_nam
[] = { "R", "S", "D", "T", "Z", "W" };
2139 printk("%-13.13s ", p
->comm
);
2140 state
= p
->state
? __ffs(p
->state
) + 1 : 0;
2141 if (((unsigned) state
) < sizeof(stat_nam
)/sizeof(char *))
2142 printk(stat_nam
[state
]);
2145 #if (BITS_PER_LONG == 32)
2147 printk(" current ");
2149 printk(" %08lX ", thread_saved_pc(p
));
2152 printk(" current task ");
2154 printk(" %016lx ", thread_saved_pc(p
));
2157 unsigned long * n
= (unsigned long *) (p
->thread_info
+1);
2160 free
= (unsigned long) n
- (unsigned long)(p
+1);
2162 printk("%5lu %5d %6d ", free
, p
->pid
, p
->parent
->pid
);
2163 if ((relative
= eldest_child(p
)))
2164 printk("%5d ", relative
->pid
);
2167 if ((relative
= younger_sibling(p
)))
2168 printk("%7d", relative
->pid
);
2171 if ((relative
= older_sibling(p
)))
2172 printk(" %5d", relative
->pid
);
2176 printk(" (L-TLB)\n");
2178 printk(" (NOTLB)\n");
2180 show_stack(p
, NULL
);
2183 void show_state(void)
2187 #if (BITS_PER_LONG == 32)
2190 printk(" task PC stack pid father child younger older\n");
2194 printk(" task PC stack pid father child younger older\n");
2196 read_lock(&tasklist_lock
);
2197 do_each_thread(g
, p
) {
2199 * reset the NMI-timeout, listing all files on a slow
2200 * console might take alot of time:
2202 touch_nmi_watchdog();
2204 } while_each_thread(g
, p
);
2206 read_unlock(&tasklist_lock
);
2209 void __init
init_idle(task_t
*idle
, int cpu
)
2211 runqueue_t
*idle_rq
= cpu_rq(cpu
), *rq
= cpu_rq(task_cpu(idle
));
2212 unsigned long flags
;
2214 local_irq_save(flags
);
2215 double_rq_lock(idle_rq
, rq
);
2217 idle_rq
->curr
= idle_rq
->idle
= idle
;
2218 deactivate_task(idle
, rq
);
2220 idle
->prio
= MAX_PRIO
;
2221 idle
->state
= TASK_RUNNING
;
2222 set_task_cpu(idle
, cpu
);
2223 double_rq_unlock(idle_rq
, rq
);
2224 set_tsk_need_resched(idle
);
2225 local_irq_restore(flags
);
2227 /* Set the preempt count _outside_ the spinlocks! */
2228 #ifdef CONFIG_PREEMPT
2229 idle
->thread_info
->preempt_count
= (idle
->lock_depth
>= 0);
2231 idle
->thread_info
->preempt_count
= 0;
2237 * This is how migration works:
2239 * 1) we queue a migration_req_t structure in the source CPU's
2240 * runqueue and wake up that CPU's migration thread.
2241 * 2) we down() the locked semaphore => thread blocks.
2242 * 3) migration thread wakes up (implicitly it forces the migrated
2243 * thread off the CPU)
2244 * 4) it gets the migration request and checks whether the migrated
2245 * task is still in the wrong runqueue.
2246 * 5) if it's in the wrong runqueue then the migration thread removes
2247 * it and puts it into the right queue.
2248 * 6) migration thread up()s the semaphore.
2249 * 7) we wake up and the migration is done.
2253 struct list_head list
;
2255 struct completion done
;
2259 * Change a given task's CPU affinity. Migrate the thread to a
2260 * proper CPU and schedule it away if the CPU it's executing on
2261 * is removed from the allowed bitmask.
2263 * NOTE: the caller must have a valid reference to the task, the
2264 * task must not exit() & deallocate itself prematurely. The
2265 * call is not atomic; no spinlocks may be held.
2267 int set_cpus_allowed(task_t
*p
, unsigned long new_mask
)
2269 unsigned long flags
;
2270 migration_req_t req
;
2273 if (any_online_cpu(new_mask
) == NR_CPUS
)
2276 rq
= task_rq_lock(p
, &flags
);
2277 p
->cpus_allowed
= new_mask
;
2279 * Can the task run on the task's current CPU? If not then
2280 * migrate the thread off to a proper CPU.
2282 if (new_mask
& (1UL << task_cpu(p
))) {
2283 task_rq_unlock(rq
, &flags
);
2287 * If the task is not on a runqueue (and not running), then
2288 * it is sufficient to simply update the task's cpu field.
2290 if (!p
->array
&& !task_running(rq
, p
)) {
2291 set_task_cpu(p
, any_online_cpu(p
->cpus_allowed
));
2292 task_rq_unlock(rq
, &flags
);
2295 init_completion(&req
.done
);
2297 list_add(&req
.list
, &rq
->migration_queue
);
2298 task_rq_unlock(rq
, &flags
);
2300 wake_up_process(rq
->migration_thread
);
2302 wait_for_completion(&req
.done
);
2306 /* Move (not current) task off this cpu, onto dest cpu. */
2307 static void move_task_away(struct task_struct
*p
, int dest_cpu
)
2309 runqueue_t
*rq_dest
;
2310 unsigned long flags
;
2312 rq_dest
= cpu_rq(dest_cpu
);
2314 local_irq_save(flags
);
2315 double_rq_lock(this_rq(), rq_dest
);
2316 if (task_cpu(p
) != smp_processor_id())
2317 goto out
; /* Already moved */
2319 set_task_cpu(p
, dest_cpu
);
2321 deactivate_task(p
, this_rq());
2322 activate_task(p
, rq_dest
);
2323 if (p
->prio
< rq_dest
->curr
->prio
)
2324 resched_task(rq_dest
->curr
);
2327 double_rq_unlock(this_rq(), rq_dest
);
2328 local_irq_restore(flags
);
2332 * migration_thread - this is a highprio system thread that performs
2333 * thread migration by bumping thread off CPU then 'pushing' onto
2336 static int migration_thread(void * data
)
2338 /* Marking "param" __user is ok, since we do a set_fs(KERNEL_DS); */
2339 struct sched_param __user param
= { .sched_priority
= MAX_RT_PRIO
-1 };
2340 int cpu
= (long) data
;
2344 daemonize("migration/%d", cpu
);
2348 * Either we are running on the right CPU, or there's a a
2349 * migration thread on this CPU, guaranteed (we're started
2352 set_cpus_allowed(current
, 1UL << cpu
);
2354 ret
= setscheduler(0, SCHED_FIFO
, ¶m
);
2357 rq
->migration_thread
= current
;
2360 struct list_head
*head
;
2361 migration_req_t
*req
;
2363 spin_lock_irq(&rq
->lock
);
2364 head
= &rq
->migration_queue
;
2365 current
->state
= TASK_INTERRUPTIBLE
;
2366 if (list_empty(head
)) {
2367 spin_unlock_irq(&rq
->lock
);
2371 req
= list_entry(head
->next
, migration_req_t
, list
);
2372 list_del_init(head
->next
);
2373 spin_unlock_irq(&rq
->lock
);
2375 move_task_away(req
->task
,
2376 any_online_cpu(req
->task
->cpus_allowed
));
2377 complete(&req
->done
);
2382 * migration_call - callback that gets triggered when a CPU is added.
2383 * Here we can start up the necessary migration thread for the new CPU.
2385 static int migration_call(struct notifier_block
*nfb
,
2386 unsigned long action
,
2391 printk("Starting migration thread for cpu %li\n",
2393 kernel_thread(migration_thread
, hcpu
, CLONE_KERNEL
);
2394 while (!cpu_rq((long)hcpu
)->migration_thread
)
2401 static struct notifier_block migration_notifier
= { &migration_call
, NULL
, 0 };
2403 __init
int migration_init(void)
2405 /* Start one for boot CPU. */
2406 migration_call(&migration_notifier
, CPU_ONLINE
,
2407 (void *)(long)smp_processor_id());
2408 register_cpu_notifier(&migration_notifier
);
2414 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2416 * The 'big kernel lock'
2418 * This spinlock is taken and released recursively by lock_kernel()
2419 * and unlock_kernel(). It is transparently dropped and reaquired
2420 * over schedule(). It is used to protect legacy code that hasn't
2421 * been migrated to a proper locking design yet.
2423 * Don't use in new code.
2425 spinlock_t kernel_flag __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
2428 static void kstat_init_cpu(int cpu
)
2430 /* Add any initialisation to kstat here */
2431 /* Useful when cpu offlining logic is added.. */
2434 static int __devinit
kstat_cpu_notify(struct notifier_block
*self
,
2435 unsigned long action
, void *hcpu
)
2437 int cpu
= (unsigned long)hcpu
;
2439 case CPU_UP_PREPARE
:
2440 kstat_init_cpu(cpu
);
2448 static struct notifier_block __devinitdata kstat_nb
= {
2449 .notifier_call
= kstat_cpu_notify
,
2453 __init
static void init_kstat(void) {
2454 kstat_cpu_notify(&kstat_nb
, (unsigned long)CPU_UP_PREPARE
,
2455 (void *)(long)smp_processor_id());
2456 register_cpu_notifier(&kstat_nb
);
2459 void __init
sched_init(void)
2464 /* Init the kstat counters */
2466 for (i
= 0; i
< NR_CPUS
; i
++) {
2467 prio_array_t
*array
;
2470 rq
->active
= rq
->arrays
;
2471 rq
->expired
= rq
->arrays
+ 1;
2472 spin_lock_init(&rq
->lock
);
2473 INIT_LIST_HEAD(&rq
->migration_queue
);
2474 atomic_set(&rq
->nr_iowait
, 0);
2475 nr_running_init(rq
);
2477 for (j
= 0; j
< 2; j
++) {
2478 array
= rq
->arrays
+ j
;
2479 for (k
= 0; k
< MAX_PRIO
; k
++) {
2480 INIT_LIST_HEAD(array
->queue
+ k
);
2481 __clear_bit(k
, array
->bitmap
);
2483 // delimiter for bitsearch
2484 __set_bit(MAX_PRIO
, array
->bitmap
);
2488 * We have to do a little magic to get the first
2489 * thread right in SMP mode.
2494 set_task_cpu(current
, smp_processor_id());
2495 wake_up_forked_process(current
);
2500 * The boot idle thread does lazy MMU switching as well:
2502 atomic_inc(&init_mm
.mm_count
);
2503 enter_lazy_tlb(&init_mm
, current
, smp_processor_id());
2506 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
2507 void __might_sleep(char *file
, int line
)
2509 #if defined(in_atomic)
2510 static unsigned long prev_jiffy
; /* ratelimiting */
2512 if (in_atomic() || irqs_disabled()) {
2513 if (time_before(jiffies
, prev_jiffy
+ HZ
))
2515 prev_jiffy
= jiffies
;
2516 printk(KERN_ERR
"Debug: sleeping function called from illegal"
2517 " context at %s:%d\n", file
, line
);
2525 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
2527 * This could be a long-held lock. If another CPU holds it for a long time,
2528 * and that CPU is not asked to reschedule then *this* CPU will spin on the
2529 * lock for a long time, even if *this* CPU is asked to reschedule.
2531 * So what we do here, in the slow (contended) path is to spin on the lock by
2532 * hand while permitting preemption.
2534 * Called inside preempt_disable().
2536 void __preempt_spin_lock(spinlock_t
*lock
)
2538 if (preempt_count() > 1) {
2539 _raw_spin_lock(lock
);
2544 while (spin_is_locked(lock
))
2547 } while (!_raw_spin_trylock(lock
));
2550 void __preempt_write_lock(rwlock_t
*lock
)
2552 if (preempt_count() > 1) {
2553 _raw_write_lock(lock
);
2559 while (rwlock_is_locked(lock
))
2562 } while (!_raw_write_trylock(lock
));