[PATCH] raid5 BIO_UPTODATE set
[linux-2.6/history.git] / kernel / sched.c
blobaa13d6a55721690448d4046657552021cc179eae
1 /*
2 * kernel/sched.c
4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991-2002 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
19 #include <linux/mm.h>
20 #include <linux/nmi.h>
21 #include <linux/init.h>
22 #include <asm/uaccess.h>
23 #include <linux/highmem.h>
24 #include <linux/smp_lock.h>
25 #include <asm/mmu_context.h>
26 #include <linux/interrupt.h>
27 #include <linux/completion.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/security.h>
30 #include <linux/notifier.h>
31 #include <linux/delay.h>
32 #include <linux/timer.h>
35 * Convert user-nice values [ -20 ... 0 ... 19 ]
36 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
37 * and back.
39 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
40 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
41 #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
44 * 'User priority' is the nice value converted to something we
45 * can work with better when scaling various scheduler parameters,
46 * it's a [ 0 ... 39 ] range.
48 #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
49 #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
50 #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
53 * These are the 'tuning knobs' of the scheduler:
55 * Minimum timeslice is 10 msecs, default timeslice is 150 msecs,
56 * maximum timeslice is 300 msecs. Timeslices get refilled after
57 * they expire.
59 #define MIN_TIMESLICE ( 10 * HZ / 1000)
60 #define MAX_TIMESLICE (300 * HZ / 1000)
61 #define CHILD_PENALTY 95
62 #define PARENT_PENALTY 100
63 #define EXIT_WEIGHT 3
64 #define PRIO_BONUS_RATIO 25
65 #define INTERACTIVE_DELTA 2
66 #define MAX_SLEEP_AVG (2*HZ)
67 #define STARVATION_LIMIT (2*HZ)
70 * If a task is 'interactive' then we reinsert it in the active
71 * array after it has expired its current timeslice. (it will not
72 * continue to run immediately, it will still roundrobin with
73 * other interactive tasks.)
75 * This part scales the interactivity limit depending on niceness.
77 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
78 * Here are a few examples of different nice levels:
80 * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
81 * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
82 * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
83 * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
84 * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
86 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
87 * priority range a task can explore, a value of '1' means the
88 * task is rated interactive.)
90 * Ie. nice +19 tasks can never get 'interactive' enough to be
91 * reinserted into the active array. And only heavily CPU-hog nice -20
92 * tasks will be expired. Default nice 0 tasks are somewhere between,
93 * it takes some effort for them to get interactive, but it's not
94 * too hard.
97 #define SCALE(v1,v1_max,v2_max) \
98 (v1) * (v2_max) / (v1_max)
100 #define DELTA(p) \
101 (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \
102 INTERACTIVE_DELTA)
104 #define TASK_INTERACTIVE(p) \
105 ((p)->prio <= (p)->static_prio - DELTA(p))
108 * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
109 * to time slice values.
111 * The higher a thread's priority, the bigger timeslices
112 * it gets during one round of execution. But even the lowest
113 * priority thread gets MIN_TIMESLICE worth of execution time.
115 * task_timeslice() is the interface that is used by the scheduler.
118 #define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
119 ((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1)))
121 static inline unsigned int task_timeslice(task_t *p)
123 return BASE_TIMESLICE(p);
127 * These are the runqueue data structures:
130 #define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
132 typedef struct runqueue runqueue_t;
134 struct prio_array {
135 int nr_active;
136 unsigned long bitmap[BITMAP_SIZE];
137 struct list_head queue[MAX_PRIO];
141 * This is the main, per-CPU runqueue data structure.
143 * Locking rule: those places that want to lock multiple runqueues
144 * (such as the load balancing or the thread migration code), lock
145 * acquire operations must be ordered by ascending &runqueue.
147 struct runqueue {
148 spinlock_t lock;
149 unsigned long nr_running, nr_switches, expired_timestamp,
150 nr_uninterruptible;
151 task_t *curr, *idle;
152 prio_array_t *active, *expired, arrays[2];
153 int prev_nr_running[NR_CPUS];
155 task_t *migration_thread;
156 struct list_head migration_queue;
158 } ____cacheline_aligned;
160 static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
162 #define cpu_rq(cpu) (runqueues + (cpu))
163 #define this_rq() cpu_rq(smp_processor_id())
164 #define task_rq(p) cpu_rq(task_cpu(p))
165 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
166 #define rt_task(p) ((p)->prio < MAX_RT_PRIO)
169 * Default context-switch locking:
171 #ifndef prepare_arch_switch
172 # define prepare_arch_switch(rq, next) do { } while(0)
173 # define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
174 # define task_running(rq, p) ((rq)->curr == (p))
175 #endif
178 * task_rq_lock - lock the runqueue a given task resides on and disable
179 * interrupts. Note the ordering: we can safely lookup the task_rq without
180 * explicitly disabling preemption.
182 static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
184 struct runqueue *rq;
186 repeat_lock_task:
187 local_irq_save(*flags);
188 rq = task_rq(p);
189 spin_lock(&rq->lock);
190 if (unlikely(rq != task_rq(p))) {
191 spin_unlock_irqrestore(&rq->lock, *flags);
192 goto repeat_lock_task;
194 return rq;
197 static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
199 spin_unlock_irqrestore(&rq->lock, *flags);
203 * rq_lock - lock a given runqueue and disable interrupts.
205 static inline runqueue_t *this_rq_lock(void)
207 runqueue_t *rq;
209 local_irq_disable();
210 rq = this_rq();
211 spin_lock(&rq->lock);
213 return rq;
216 static inline void rq_unlock(runqueue_t *rq)
218 spin_unlock_irq(&rq->lock);
222 * Adding/removing a task to/from a priority array:
224 static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
226 array->nr_active--;
227 list_del(&p->run_list);
228 if (list_empty(array->queue + p->prio))
229 __clear_bit(p->prio, array->bitmap);
232 static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
234 list_add_tail(&p->run_list, array->queue + p->prio);
235 __set_bit(p->prio, array->bitmap);
236 array->nr_active++;
237 p->array = array;
241 * effective_prio - return the priority that is based on the static
242 * priority but is modified by bonuses/penalties.
244 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
245 * into the -5 ... 0 ... +5 bonus/penalty range.
247 * We use 25% of the full 0...39 priority range so that:
249 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
250 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
252 * Both properties are important to certain workloads.
254 static inline int effective_prio(task_t *p)
256 int bonus, prio;
258 bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 -
259 MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2;
261 prio = p->static_prio - bonus;
262 if (prio < MAX_RT_PRIO)
263 prio = MAX_RT_PRIO;
264 if (prio > MAX_PRIO-1)
265 prio = MAX_PRIO-1;
266 return prio;
270 * activate_task - move a task to the runqueue.
272 * Also update all the scheduling statistics stuff. (sleep average
273 * calculation, priority modifiers, etc.)
275 static inline void activate_task(task_t *p, runqueue_t *rq)
277 unsigned long sleep_time = jiffies - p->sleep_timestamp;
278 prio_array_t *array = rq->active;
280 if (!rt_task(p) && sleep_time) {
282 * This code gives a bonus to interactive tasks. We update
283 * an 'average sleep time' value here, based on
284 * sleep_timestamp. The more time a task spends sleeping,
285 * the higher the average gets - and the higher the priority
286 * boost gets as well.
288 p->sleep_avg += sleep_time;
289 if (p->sleep_avg > MAX_SLEEP_AVG)
290 p->sleep_avg = MAX_SLEEP_AVG;
291 p->prio = effective_prio(p);
293 enqueue_task(p, array);
294 rq->nr_running++;
298 * deactivate_task - remove a task from the runqueue.
300 static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
302 rq->nr_running--;
303 if (p->state == TASK_UNINTERRUPTIBLE)
304 rq->nr_uninterruptible++;
305 dequeue_task(p, p->array);
306 p->array = NULL;
310 * resched_task - mark a task 'to be rescheduled now'.
312 * On UP this means the setting of the need_resched flag, on SMP it
313 * might also involve a cross-CPU call to trigger the scheduler on
314 * the target CPU.
316 static inline void resched_task(task_t *p)
318 #ifdef CONFIG_SMP
319 int need_resched, nrpolling;
321 preempt_disable();
322 /* minimise the chance of sending an interrupt to poll_idle() */
323 nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
324 need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
325 nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
327 if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
328 smp_send_reschedule(task_cpu(p));
329 preempt_enable();
330 #else
331 set_tsk_need_resched(p);
332 #endif
335 #ifdef CONFIG_SMP
338 * wait_task_inactive - wait for a thread to unschedule.
340 * The caller must ensure that the task *will* unschedule sometime soon,
341 * else this function might spin for a *long* time.
343 void wait_task_inactive(task_t * p)
345 unsigned long flags;
346 runqueue_t *rq;
348 repeat:
349 preempt_disable();
350 rq = task_rq(p);
351 if (unlikely(task_running(rq, p))) {
352 cpu_relax();
354 * enable/disable preemption just to make this
355 * a preemption point - we are busy-waiting
356 * anyway.
358 preempt_enable();
359 goto repeat;
361 rq = task_rq_lock(p, &flags);
362 if (unlikely(task_running(rq, p))) {
363 task_rq_unlock(rq, &flags);
364 preempt_enable();
365 goto repeat;
367 task_rq_unlock(rq, &flags);
368 preempt_enable();
370 #endif
373 * kick_if_running - kick the remote CPU if the task is running currently.
375 * This code is used by the signal code to signal tasks
376 * which are in user-mode, as quickly as possible.
378 * (Note that we do this lockless - if the task does anything
379 * while the message is in flight then it will notice the
380 * sigpending condition anyway.)
382 void kick_if_running(task_t * p)
384 if ((task_running(task_rq(p), p)) && (task_cpu(p) != smp_processor_id()))
385 resched_task(p);
388 /***
389 * try_to_wake_up - wake up a thread
390 * @p: the to-be-woken-up thread
391 * @sync: do a synchronous wakeup?
393 * Put it on the run-queue if it's not already there. The "current"
394 * thread is always on the run-queue (except when the actual
395 * re-schedule is in progress), and as such you're allowed to do
396 * the simpler "current->state = TASK_RUNNING" to mark yourself
397 * runnable without the overhead of this.
399 * returns failure only if the task is already active.
401 static int try_to_wake_up(task_t * p, int sync)
403 unsigned long flags;
404 int success = 0;
405 long old_state;
406 runqueue_t *rq;
408 repeat_lock_task:
409 rq = task_rq_lock(p, &flags);
410 old_state = p->state;
411 if (!p->array) {
413 * Fast-migrate the task if it's not running or runnable
414 * currently. Do not violate hard affinity.
416 if (unlikely(sync && !task_running(rq, p) &&
417 (task_cpu(p) != smp_processor_id()) &&
418 (p->cpus_allowed & (1UL << smp_processor_id())))) {
420 set_task_cpu(p, smp_processor_id());
421 task_rq_unlock(rq, &flags);
422 goto repeat_lock_task;
424 if (old_state == TASK_UNINTERRUPTIBLE)
425 rq->nr_uninterruptible--;
426 activate_task(p, rq);
428 if (p->prio < rq->curr->prio)
429 resched_task(rq->curr);
430 success = 1;
432 p->state = TASK_RUNNING;
433 task_rq_unlock(rq, &flags);
435 return success;
438 int wake_up_process(task_t * p)
440 return try_to_wake_up(p, 0);
444 * wake_up_forked_process - wake up a freshly forked process.
446 * This function will do some initial scheduler statistics housekeeping
447 * that must be done for every newly created process.
449 void wake_up_forked_process(task_t * p)
451 runqueue_t *rq = this_rq_lock();
453 p->state = TASK_RUNNING;
454 if (!rt_task(p)) {
456 * We decrease the sleep average of forking parents
457 * and children as well, to keep max-interactive tasks
458 * from forking tasks that are max-interactive.
460 current->sleep_avg = current->sleep_avg * PARENT_PENALTY / 100;
461 p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
462 p->prio = effective_prio(p);
464 set_task_cpu(p, smp_processor_id());
465 activate_task(p, rq);
467 rq_unlock(rq);
471 * Potentially available exiting-child timeslices are
472 * retrieved here - this way the parent does not get
473 * penalized for creating too many threads.
475 * (this cannot be used to 'generate' timeslices
476 * artificially, because any timeslice recovered here
477 * was given away by the parent in the first place.)
479 void sched_exit(task_t * p)
481 unsigned long flags;
483 local_irq_save(flags);
484 if (p->first_time_slice) {
485 p->parent->time_slice += p->time_slice;
486 if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
487 p->parent->time_slice = MAX_TIMESLICE;
489 local_irq_restore(flags);
491 * If the child was a (relative-) CPU hog then decrease
492 * the sleep_avg of the parent as well.
494 if (p->sleep_avg < p->parent->sleep_avg)
495 p->parent->sleep_avg = (p->parent->sleep_avg * EXIT_WEIGHT +
496 p->sleep_avg) / (EXIT_WEIGHT + 1);
500 * schedule_tail - first thing a freshly forked thread must call.
501 * @prev: the thread we just switched away from.
503 #if CONFIG_SMP || CONFIG_PREEMPT
504 asmlinkage void schedule_tail(task_t *prev)
506 finish_arch_switch(this_rq(), prev);
508 #endif
511 * context_switch - switch to the new MM and the new
512 * thread's register state.
514 static inline task_t * context_switch(task_t *prev, task_t *next)
516 struct mm_struct *mm = next->mm;
517 struct mm_struct *oldmm = prev->active_mm;
519 if (unlikely(!mm)) {
520 next->active_mm = oldmm;
521 atomic_inc(&oldmm->mm_count);
522 enter_lazy_tlb(oldmm, next, smp_processor_id());
523 } else
524 switch_mm(oldmm, mm, next, smp_processor_id());
526 if (unlikely(!prev->mm)) {
527 prev->active_mm = NULL;
528 mmdrop(oldmm);
531 /* Here we just switch the register state and the stack. */
532 switch_to(prev, next, prev);
534 return prev;
538 * nr_running, nr_uninterruptible and nr_context_switches:
540 * externally visible scheduler statistics: current number of runnable
541 * threads, current number of uninterruptible-sleeping threads, total
542 * number of context switches performed since bootup.
544 unsigned long nr_running(void)
546 unsigned long i, sum = 0;
548 for (i = 0; i < NR_CPUS; i++)
549 sum += cpu_rq(i)->nr_running;
551 return sum;
554 unsigned long nr_uninterruptible(void)
556 unsigned long i, sum = 0;
558 for (i = 0; i < NR_CPUS; i++)
559 sum += cpu_rq(i)->nr_uninterruptible;
561 return sum;
564 unsigned long nr_context_switches(void)
566 unsigned long i, sum = 0;
568 for (i = 0; i < NR_CPUS; i++)
569 sum += cpu_rq(i)->nr_switches;
571 return sum;
575 * double_rq_lock - safely lock two runqueues
577 * Note this does not disable interrupts like task_rq_lock,
578 * you need to do so manually before calling.
580 static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
582 if (rq1 == rq2)
583 spin_lock(&rq1->lock);
584 else {
585 if (rq1 < rq2) {
586 spin_lock(&rq1->lock);
587 spin_lock(&rq2->lock);
588 } else {
589 spin_lock(&rq2->lock);
590 spin_lock(&rq1->lock);
596 * double_rq_unlock - safely unlock two runqueues
598 * Note this does not restore interrupts like task_rq_unlock,
599 * you need to do so manually after calling.
601 static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
603 spin_unlock(&rq1->lock);
604 if (rq1 != rq2)
605 spin_unlock(&rq2->lock);
608 #if CONFIG_SMP
611 * double_lock_balance - lock the busiest runqueue
613 * this_rq is locked already. Recalculate nr_running if we have to
614 * drop the runqueue lock.
616 static inline unsigned int double_lock_balance(runqueue_t *this_rq,
617 runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running)
619 if (unlikely(!spin_trylock(&busiest->lock))) {
620 if (busiest < this_rq) {
621 spin_unlock(&this_rq->lock);
622 spin_lock(&busiest->lock);
623 spin_lock(&this_rq->lock);
624 /* Need to recalculate nr_running */
625 if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu]))
626 nr_running = this_rq->nr_running;
627 else
628 nr_running = this_rq->prev_nr_running[this_cpu];
629 } else
630 spin_lock(&busiest->lock);
632 return nr_running;
636 * find_busiest_queue - find the busiest runqueue.
638 static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance)
640 int nr_running, load, max_load, i;
641 runqueue_t *busiest, *rq_src;
644 * We search all runqueues to find the most busy one.
645 * We do this lockless to reduce cache-bouncing overhead,
646 * we re-check the 'best' source CPU later on again, with
647 * the lock held.
649 * We fend off statistical fluctuations in runqueue lengths by
650 * saving the runqueue length during the previous load-balancing
651 * operation and using the smaller one the current and saved lengths.
652 * If a runqueue is long enough for a longer amount of time then
653 * we recognize it and pull tasks from it.
655 * The 'current runqueue length' is a statistical maximum variable,
656 * for that one we take the longer one - to avoid fluctuations in
657 * the other direction. So for a load-balance to happen it needs
658 * stable long runqueue on the target CPU and stable short runqueue
659 * on the local runqueue.
661 * We make an exception if this CPU is about to become idle - in
662 * that case we are less picky about moving a task across CPUs and
663 * take what can be taken.
665 if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu]))
666 nr_running = this_rq->nr_running;
667 else
668 nr_running = this_rq->prev_nr_running[this_cpu];
670 busiest = NULL;
671 max_load = 1;
672 for (i = 0; i < NR_CPUS; i++) {
673 if (!cpu_online(i))
674 continue;
676 rq_src = cpu_rq(i);
677 if (idle || (rq_src->nr_running < this_rq->prev_nr_running[i]))
678 load = rq_src->nr_running;
679 else
680 load = this_rq->prev_nr_running[i];
681 this_rq->prev_nr_running[i] = rq_src->nr_running;
683 if ((load > max_load) && (rq_src != this_rq)) {
684 busiest = rq_src;
685 max_load = load;
689 if (likely(!busiest))
690 goto out;
692 *imbalance = (max_load - nr_running) / 2;
694 /* It needs an at least ~25% imbalance to trigger balancing. */
695 if (!idle && (*imbalance < (max_load + 3)/4)) {
696 busiest = NULL;
697 goto out;
700 nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running);
702 * Make sure nothing changed since we checked the
703 * runqueue length.
705 if (busiest->nr_running <= nr_running + 1) {
706 spin_unlock(&busiest->lock);
707 busiest = NULL;
709 out:
710 return busiest;
714 * pull_task - move a task from a remote runqueue to the local runqueue.
715 * Both runqueues must be locked.
717 static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
719 dequeue_task(p, src_array);
720 src_rq->nr_running--;
721 set_task_cpu(p, this_cpu);
722 this_rq->nr_running++;
723 enqueue_task(p, this_rq->active);
725 * Note that idle threads have a prio of MAX_PRIO, for this test
726 * to be always true for them.
728 if (p->prio < this_rq->curr->prio)
729 set_need_resched();
733 * Current runqueue is empty, or rebalance tick: if there is an
734 * inbalance (current runqueue is too short) then pull from
735 * busiest runqueue(s).
737 * We call this with the current runqueue locked,
738 * irqs disabled.
740 static void load_balance(runqueue_t *this_rq, int idle)
742 int imbalance, idx, this_cpu = smp_processor_id();
743 runqueue_t *busiest;
744 prio_array_t *array;
745 struct list_head *head, *curr;
746 task_t *tmp;
748 busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance);
749 if (!busiest)
750 goto out;
753 * We first consider expired tasks. Those will likely not be
754 * executed in the near future, and they are most likely to
755 * be cache-cold, thus switching CPUs has the least effect
756 * on them.
758 if (busiest->expired->nr_active)
759 array = busiest->expired;
760 else
761 array = busiest->active;
763 new_array:
764 /* Start searching at priority 0: */
765 idx = 0;
766 skip_bitmap:
767 if (!idx)
768 idx = sched_find_first_bit(array->bitmap);
769 else
770 idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
771 if (idx == MAX_PRIO) {
772 if (array == busiest->expired) {
773 array = busiest->active;
774 goto new_array;
776 goto out_unlock;
779 head = array->queue + idx;
780 curr = head->prev;
781 skip_queue:
782 tmp = list_entry(curr, task_t, run_list);
785 * We do not migrate tasks that are:
786 * 1) running (obviously), or
787 * 2) cannot be migrated to this CPU due to cpus_allowed, or
788 * 3) are cache-hot on their current CPU.
791 #define CAN_MIGRATE_TASK(p,rq,this_cpu) \
792 ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
793 !task_running(rq, p) && \
794 ((p)->cpus_allowed & (1UL << (this_cpu))))
796 curr = curr->prev;
798 if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
799 if (curr != head)
800 goto skip_queue;
801 idx++;
802 goto skip_bitmap;
804 pull_task(busiest, array, tmp, this_rq, this_cpu);
805 if (!idle && --imbalance) {
806 if (curr != head)
807 goto skip_queue;
808 idx++;
809 goto skip_bitmap;
811 out_unlock:
812 spin_unlock(&busiest->lock);
813 out:
818 * One of the idle_cpu_tick() and busy_cpu_tick() functions will
819 * get called every timer tick, on every CPU. Our balancing action
820 * frequency and balancing agressivity depends on whether the CPU is
821 * idle or not.
823 * busy-rebalance every 250 msecs. idle-rebalance every 1 msec. (or on
824 * systems with HZ=100, every 10 msecs.)
826 #define BUSY_REBALANCE_TICK (HZ/4 ?: 1)
827 #define IDLE_REBALANCE_TICK (HZ/1000 ?: 1)
829 static inline void idle_tick(runqueue_t *rq)
831 if (jiffies % IDLE_REBALANCE_TICK)
832 return;
833 spin_lock(&rq->lock);
834 load_balance(rq, 1);
835 spin_unlock(&rq->lock);
838 #endif
841 * We place interactive tasks back into the active array, if possible.
843 * To guarantee that this does not starve expired tasks we ignore the
844 * interactivity of a task if the first expired task had to wait more
845 * than a 'reasonable' amount of time. This deadline timeout is
846 * load-dependent, as the frequency of array switched decreases with
847 * increasing number of running tasks:
849 #define EXPIRED_STARVING(rq) \
850 ((rq)->expired_timestamp && \
851 (jiffies - (rq)->expired_timestamp >= \
852 STARVATION_LIMIT * ((rq)->nr_running) + 1))
855 * This function gets called by the timer code, with HZ frequency.
856 * We call it with interrupts disabled.
858 void scheduler_tick(int user_ticks, int sys_ticks)
860 int cpu = smp_processor_id();
861 runqueue_t *rq = this_rq();
862 task_t *p = current;
864 run_local_timers();
865 if (p == rq->idle) {
866 /* note: this timer irq context must be accounted for as well */
867 if (irq_count() - HARDIRQ_OFFSET >= SOFTIRQ_OFFSET)
868 kstat.per_cpu_system[cpu] += sys_ticks;
869 #if CONFIG_SMP
870 idle_tick(rq);
871 #endif
872 return;
874 if (TASK_NICE(p) > 0)
875 kstat.per_cpu_nice[cpu] += user_ticks;
876 else
877 kstat.per_cpu_user[cpu] += user_ticks;
878 kstat.per_cpu_system[cpu] += sys_ticks;
880 /* Task might have expired already, but not scheduled off yet */
881 if (p->array != rq->active) {
882 set_tsk_need_resched(p);
883 return;
885 spin_lock(&rq->lock);
886 if (unlikely(rt_task(p))) {
888 * RR tasks need a special form of timeslice management.
889 * FIFO tasks have no timeslices.
891 if ((p->policy == SCHED_RR) && !--p->time_slice) {
892 p->time_slice = task_timeslice(p);
893 p->first_time_slice = 0;
894 set_tsk_need_resched(p);
896 /* put it at the end of the queue: */
897 dequeue_task(p, rq->active);
898 enqueue_task(p, rq->active);
900 goto out;
903 * The task was running during this tick - update the
904 * time slice counter and the sleep average. Note: we
905 * do not update a thread's priority until it either
906 * goes to sleep or uses up its timeslice. This makes
907 * it possible for interactive tasks to use up their
908 * timeslices at their highest priority levels.
910 if (p->sleep_avg)
911 p->sleep_avg--;
912 if (!--p->time_slice) {
913 dequeue_task(p, rq->active);
914 set_tsk_need_resched(p);
915 p->prio = effective_prio(p);
916 p->time_slice = task_timeslice(p);
917 p->first_time_slice = 0;
919 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
920 if (!rq->expired_timestamp)
921 rq->expired_timestamp = jiffies;
922 enqueue_task(p, rq->expired);
923 } else
924 enqueue_task(p, rq->active);
926 out:
927 #if CONFIG_SMP
928 if (!(jiffies % BUSY_REBALANCE_TICK))
929 load_balance(rq, 0);
930 #endif
931 spin_unlock(&rq->lock);
934 void scheduling_functions_start_here(void) { }
937 * schedule() is the main scheduler function.
939 asmlinkage void schedule(void)
941 task_t *prev, *next;
942 runqueue_t *rq;
943 prio_array_t *array;
944 struct list_head *queue;
945 int idx;
948 * Test if we are atomic. Since do_exit() needs to call into
949 * schedule() atomically, we ignore that path for now.
950 * Otherwise, whine if we are scheduling when we should not be.
952 if (likely(current->state != TASK_ZOMBIE)) {
953 if (unlikely(in_atomic())) {
954 printk(KERN_ERR "bad: scheduling while atomic!\n");
955 dump_stack();
959 #if CONFIG_DEBUG_HIGHMEM
960 check_highmem_ptes();
961 #endif
962 need_resched:
963 preempt_disable();
964 prev = current;
965 rq = this_rq();
967 release_kernel_lock(prev);
968 prev->sleep_timestamp = jiffies;
969 spin_lock_irq(&rq->lock);
972 * if entering off of a kernel preemption go straight
973 * to picking the next task.
975 if (unlikely(preempt_count() & PREEMPT_ACTIVE))
976 goto pick_next_task;
978 switch (prev->state) {
979 case TASK_INTERRUPTIBLE:
980 if (unlikely(signal_pending(prev))) {
981 prev->state = TASK_RUNNING;
982 break;
984 default:
985 deactivate_task(prev, rq);
986 case TASK_RUNNING:
989 pick_next_task:
990 if (unlikely(!rq->nr_running)) {
991 #if CONFIG_SMP
992 load_balance(rq, 1);
993 if (rq->nr_running)
994 goto pick_next_task;
995 #endif
996 next = rq->idle;
997 rq->expired_timestamp = 0;
998 goto switch_tasks;
1001 array = rq->active;
1002 if (unlikely(!array->nr_active)) {
1004 * Switch the active and expired arrays.
1006 rq->active = rq->expired;
1007 rq->expired = array;
1008 array = rq->active;
1009 rq->expired_timestamp = 0;
1012 idx = sched_find_first_bit(array->bitmap);
1013 queue = array->queue + idx;
1014 next = list_entry(queue->next, task_t, run_list);
1016 switch_tasks:
1017 prefetch(next);
1018 clear_tsk_need_resched(prev);
1020 if (likely(prev != next)) {
1021 rq->nr_switches++;
1022 rq->curr = next;
1024 prepare_arch_switch(rq, next);
1025 prev = context_switch(prev, next);
1026 barrier();
1027 rq = this_rq();
1028 finish_arch_switch(rq, prev);
1029 } else
1030 spin_unlock_irq(&rq->lock);
1032 reacquire_kernel_lock(current);
1033 preempt_enable_no_resched();
1034 if (test_thread_flag(TIF_NEED_RESCHED))
1035 goto need_resched;
1038 #ifdef CONFIG_PREEMPT
1040 * this is is the entry point to schedule() from in-kernel preemption
1041 * off of preempt_enable. Kernel preemptions off return from interrupt
1042 * occur there and call schedule directly.
1044 asmlinkage void preempt_schedule(void)
1046 struct thread_info *ti = current_thread_info();
1049 * If there is a non-zero preempt_count or interrupts are disabled,
1050 * we do not want to preempt the current task. Just return..
1052 if (unlikely(ti->preempt_count || irqs_disabled()))
1053 return;
1055 need_resched:
1056 ti->preempt_count = PREEMPT_ACTIVE;
1057 schedule();
1058 ti->preempt_count = 0;
1060 /* we could miss a preemption opportunity between schedule and now */
1061 barrier();
1062 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
1063 goto need_resched;
1065 #endif /* CONFIG_PREEMPT */
1067 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
1069 task_t *p = curr->task;
1070 return ((p->state & mode) && try_to_wake_up(p, sync));
1074 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
1075 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1076 * number) then we wake all the non-exclusive tasks and one exclusive task.
1078 * There are circumstances in which we can try to wake a task which has already
1079 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1080 * zero in this (rare) case, and we handle it by continuing to scan the queue.
1082 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
1084 struct list_head *tmp, *next;
1086 list_for_each_safe(tmp, next, &q->task_list) {
1087 wait_queue_t *curr;
1088 unsigned flags;
1089 curr = list_entry(tmp, wait_queue_t, task_list);
1090 flags = curr->flags;
1091 if (curr->func(curr, mode, sync) &&
1092 (flags & WQ_FLAG_EXCLUSIVE) &&
1093 !--nr_exclusive)
1094 break;
1099 * __wake_up - wake up threads blocked on a waitqueue.
1100 * @q: the waitqueue
1101 * @mode: which threads
1102 * @nr_exclusive: how many wake-one or wake-many threads to wake up
1104 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
1106 unsigned long flags;
1108 if (unlikely(!q))
1109 return;
1111 spin_lock_irqsave(&q->lock, flags);
1112 __wake_up_common(q, mode, nr_exclusive, 0);
1113 spin_unlock_irqrestore(&q->lock, flags);
1117 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
1119 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1121 __wake_up_common(q, mode, 1, 0);
1124 #if CONFIG_SMP
1127 * __wake_up - sync- wake up threads blocked on a waitqueue.
1128 * @q: the waitqueue
1129 * @mode: which threads
1130 * @nr_exclusive: how many wake-one or wake-many threads to wake up
1132 * The sync wakeup differs that the waker knows that it will schedule
1133 * away soon, so while the target thread will be woken up, it will not
1134 * be migrated to another CPU - ie. the two threads are 'synchronized'
1135 * with each other. This can prevent needless bouncing between CPUs.
1137 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
1139 unsigned long flags;
1141 if (unlikely(!q))
1142 return;
1144 spin_lock_irqsave(&q->lock, flags);
1145 if (likely(nr_exclusive))
1146 __wake_up_common(q, mode, nr_exclusive, 1);
1147 else
1148 __wake_up_common(q, mode, nr_exclusive, 0);
1149 spin_unlock_irqrestore(&q->lock, flags);
1152 #endif
1154 void complete(struct completion *x)
1156 unsigned long flags;
1158 spin_lock_irqsave(&x->wait.lock, flags);
1159 x->done++;
1160 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
1161 spin_unlock_irqrestore(&x->wait.lock, flags);
1164 void wait_for_completion(struct completion *x)
1166 might_sleep();
1167 spin_lock_irq(&x->wait.lock);
1168 if (!x->done) {
1169 DECLARE_WAITQUEUE(wait, current);
1171 wait.flags |= WQ_FLAG_EXCLUSIVE;
1172 __add_wait_queue_tail(&x->wait, &wait);
1173 do {
1174 __set_current_state(TASK_UNINTERRUPTIBLE);
1175 spin_unlock_irq(&x->wait.lock);
1176 schedule();
1177 spin_lock_irq(&x->wait.lock);
1178 } while (!x->done);
1179 __remove_wait_queue(&x->wait, &wait);
1181 x->done--;
1182 spin_unlock_irq(&x->wait.lock);
1185 #define SLEEP_ON_VAR \
1186 unsigned long flags; \
1187 wait_queue_t wait; \
1188 init_waitqueue_entry(&wait, current);
1190 #define SLEEP_ON_HEAD \
1191 spin_lock_irqsave(&q->lock,flags); \
1192 __add_wait_queue(q, &wait); \
1193 spin_unlock(&q->lock);
1195 #define SLEEP_ON_TAIL \
1196 spin_lock_irq(&q->lock); \
1197 __remove_wait_queue(q, &wait); \
1198 spin_unlock_irqrestore(&q->lock, flags);
1200 void interruptible_sleep_on(wait_queue_head_t *q)
1202 SLEEP_ON_VAR
1204 current->state = TASK_INTERRUPTIBLE;
1206 SLEEP_ON_HEAD
1207 schedule();
1208 SLEEP_ON_TAIL
1211 long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1213 SLEEP_ON_VAR
1215 current->state = TASK_INTERRUPTIBLE;
1217 SLEEP_ON_HEAD
1218 timeout = schedule_timeout(timeout);
1219 SLEEP_ON_TAIL
1221 return timeout;
1224 void sleep_on(wait_queue_head_t *q)
1226 SLEEP_ON_VAR
1228 current->state = TASK_UNINTERRUPTIBLE;
1230 SLEEP_ON_HEAD
1231 schedule();
1232 SLEEP_ON_TAIL
1235 long sleep_on_timeout(wait_queue_head_t *q, long timeout)
1237 SLEEP_ON_VAR
1239 current->state = TASK_UNINTERRUPTIBLE;
1241 SLEEP_ON_HEAD
1242 timeout = schedule_timeout(timeout);
1243 SLEEP_ON_TAIL
1245 return timeout;
1248 void scheduling_functions_end_here(void) { }
1250 void set_user_nice(task_t *p, long nice)
1252 unsigned long flags;
1253 prio_array_t *array;
1254 runqueue_t *rq;
1256 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
1257 return;
1259 * We have to be careful, if called from sys_setpriority(),
1260 * the task might be in the middle of scheduling on another CPU.
1262 rq = task_rq_lock(p, &flags);
1263 if (rt_task(p)) {
1264 p->static_prio = NICE_TO_PRIO(nice);
1265 goto out_unlock;
1267 array = p->array;
1268 if (array)
1269 dequeue_task(p, array);
1270 p->static_prio = NICE_TO_PRIO(nice);
1271 p->prio = NICE_TO_PRIO(nice);
1272 if (array) {
1273 enqueue_task(p, array);
1275 * If the task is running and lowered its priority,
1276 * or increased its priority then reschedule its CPU:
1278 if ((NICE_TO_PRIO(nice) < p->static_prio) ||
1279 task_running(rq, p))
1280 resched_task(rq->curr);
1282 out_unlock:
1283 task_rq_unlock(rq, &flags);
1286 #ifndef __alpha__
1289 * sys_nice - change the priority of the current process.
1290 * @increment: priority increment
1292 * sys_setpriority is a more generic, but much slower function that
1293 * does similar things.
1295 asmlinkage long sys_nice(int increment)
1297 int retval;
1298 long nice;
1301 * Setpriority might change our priority at the same moment.
1302 * We don't have to worry. Conceptually one call occurs first
1303 * and we have a single winner.
1305 if (increment < 0) {
1306 if (!capable(CAP_SYS_NICE))
1307 return -EPERM;
1308 if (increment < -40)
1309 increment = -40;
1311 if (increment > 40)
1312 increment = 40;
1314 nice = PRIO_TO_NICE(current->static_prio) + increment;
1315 if (nice < -20)
1316 nice = -20;
1317 if (nice > 19)
1318 nice = 19;
1320 retval = security_ops->task_setnice(current, nice);
1321 if (retval)
1322 return retval;
1324 set_user_nice(current, nice);
1325 return 0;
1328 #endif
1331 * task_prio - return the priority value of a given task.
1332 * @p: the task in question.
1334 * This is the priority value as seen by users in /proc.
1335 * RT tasks are offset by -200. Normal tasks are centered
1336 * around 0, value goes from -16 to +15.
1338 int task_prio(task_t *p)
1340 return p->prio - MAX_USER_RT_PRIO;
1344 * task_nice - return the nice value of a given task.
1345 * @p: the task in question.
1347 int task_nice(task_t *p)
1349 return TASK_NICE(p);
1353 * task_curr - is this task currently executing on a CPU?
1354 * @p: the task in question.
1356 int task_curr(task_t *p)
1358 return cpu_curr(task_cpu(p)) == p;
1362 * idle_cpu - is a given cpu idle currently?
1363 * @cpu: the processor in question.
1365 int idle_cpu(int cpu)
1367 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
1371 * find_process_by_pid - find a process with a matching PID value.
1372 * @pid: the pid in question.
1374 static inline task_t *find_process_by_pid(pid_t pid)
1376 return pid ? find_task_by_pid(pid) : current;
1380 * setscheduler - change the scheduling policy and/or RT priority of a thread.
1382 static int setscheduler(pid_t pid, int policy, struct sched_param *param)
1384 struct sched_param lp;
1385 int retval = -EINVAL;
1386 prio_array_t *array;
1387 unsigned long flags;
1388 runqueue_t *rq;
1389 task_t *p;
1391 if (!param || pid < 0)
1392 goto out_nounlock;
1394 retval = -EFAULT;
1395 if (copy_from_user(&lp, param, sizeof(struct sched_param)))
1396 goto out_nounlock;
1399 * We play safe to avoid deadlocks.
1401 read_lock_irq(&tasklist_lock);
1403 p = find_process_by_pid(pid);
1405 retval = -ESRCH;
1406 if (!p)
1407 goto out_unlock_tasklist;
1410 * To be able to change p->policy safely, the apropriate
1411 * runqueue lock must be held.
1413 rq = task_rq_lock(p, &flags);
1415 if (policy < 0)
1416 policy = p->policy;
1417 else {
1418 retval = -EINVAL;
1419 if (policy != SCHED_FIFO && policy != SCHED_RR &&
1420 policy != SCHED_NORMAL)
1421 goto out_unlock;
1425 * Valid priorities for SCHED_FIFO and SCHED_RR are
1426 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
1428 retval = -EINVAL;
1429 if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
1430 goto out_unlock;
1431 if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
1432 goto out_unlock;
1434 retval = -EPERM;
1435 if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
1436 !capable(CAP_SYS_NICE))
1437 goto out_unlock;
1438 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1439 !capable(CAP_SYS_NICE))
1440 goto out_unlock;
1442 retval = security_ops->task_setscheduler(p, policy, &lp);
1443 if (retval)
1444 goto out_unlock;
1446 array = p->array;
1447 if (array)
1448 deactivate_task(p, task_rq(p));
1449 retval = 0;
1450 p->policy = policy;
1451 p->rt_priority = lp.sched_priority;
1452 if (policy != SCHED_NORMAL)
1453 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
1454 else
1455 p->prio = p->static_prio;
1456 if (array)
1457 activate_task(p, task_rq(p));
1459 out_unlock:
1460 task_rq_unlock(rq, &flags);
1461 out_unlock_tasklist:
1462 read_unlock_irq(&tasklist_lock);
1464 out_nounlock:
1465 return retval;
1469 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
1470 * @pid: the pid in question.
1471 * @policy: new policy
1472 * @param: structure containing the new RT priority.
1474 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
1475 struct sched_param *param)
1477 return setscheduler(pid, policy, param);
1481 * sys_sched_setparam - set/change the RT priority of a thread
1482 * @pid: the pid in question.
1483 * @param: structure containing the new RT priority.
1485 asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param)
1487 return setscheduler(pid, -1, param);
1491 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
1492 * @pid: the pid in question.
1494 asmlinkage long sys_sched_getscheduler(pid_t pid)
1496 int retval = -EINVAL;
1497 task_t *p;
1499 if (pid < 0)
1500 goto out_nounlock;
1502 retval = -ESRCH;
1503 read_lock(&tasklist_lock);
1504 p = find_process_by_pid(pid);
1505 if (p) {
1506 retval = security_ops->task_getscheduler(p);
1507 if (!retval)
1508 retval = p->policy;
1510 read_unlock(&tasklist_lock);
1512 out_nounlock:
1513 return retval;
1517 * sys_sched_getscheduler - get the RT priority of a thread
1518 * @pid: the pid in question.
1519 * @param: structure containing the RT priority.
1521 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
1523 struct sched_param lp;
1524 int retval = -EINVAL;
1525 task_t *p;
1527 if (!param || pid < 0)
1528 goto out_nounlock;
1530 read_lock(&tasklist_lock);
1531 p = find_process_by_pid(pid);
1532 retval = -ESRCH;
1533 if (!p)
1534 goto out_unlock;
1536 retval = security_ops->task_getscheduler(p);
1537 if (retval)
1538 goto out_unlock;
1540 lp.sched_priority = p->rt_priority;
1541 read_unlock(&tasklist_lock);
1544 * This one might sleep, we cannot do it with a spinlock held ...
1546 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
1548 out_nounlock:
1549 return retval;
1551 out_unlock:
1552 read_unlock(&tasklist_lock);
1553 return retval;
1557 * sys_sched_setaffinity - set the cpu affinity of a process
1558 * @pid: pid of the process
1559 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1560 * @user_mask_ptr: user-space pointer to the new cpu mask
1562 asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
1563 unsigned long *user_mask_ptr)
1565 unsigned long new_mask;
1566 int retval;
1567 task_t *p;
1569 if (len < sizeof(new_mask))
1570 return -EINVAL;
1572 if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
1573 return -EFAULT;
1575 new_mask &= cpu_online_map;
1576 if (!new_mask)
1577 return -EINVAL;
1579 read_lock(&tasklist_lock);
1581 p = find_process_by_pid(pid);
1582 if (!p) {
1583 read_unlock(&tasklist_lock);
1584 return -ESRCH;
1588 * It is not safe to call set_cpus_allowed with the
1589 * tasklist_lock held. We will bump the task_struct's
1590 * usage count and then drop tasklist_lock.
1592 get_task_struct(p);
1593 read_unlock(&tasklist_lock);
1595 retval = -EPERM;
1596 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1597 !capable(CAP_SYS_NICE))
1598 goto out_unlock;
1600 retval = 0;
1601 set_cpus_allowed(p, new_mask);
1603 out_unlock:
1604 put_task_struct(p);
1605 return retval;
1609 * sys_sched_getaffinity - get the cpu affinity of a process
1610 * @pid: pid of the process
1611 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
1612 * @user_mask_ptr: user-space pointer to hold the current cpu mask
1614 asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
1615 unsigned long *user_mask_ptr)
1617 unsigned int real_len;
1618 unsigned long mask;
1619 int retval;
1620 task_t *p;
1622 real_len = sizeof(mask);
1623 if (len < real_len)
1624 return -EINVAL;
1626 read_lock(&tasklist_lock);
1628 retval = -ESRCH;
1629 p = find_process_by_pid(pid);
1630 if (!p)
1631 goto out_unlock;
1633 retval = 0;
1634 mask = p->cpus_allowed & cpu_online_map;
1636 out_unlock:
1637 read_unlock(&tasklist_lock);
1638 if (retval)
1639 return retval;
1640 if (copy_to_user(user_mask_ptr, &mask, real_len))
1641 return -EFAULT;
1642 return real_len;
1646 * sys_sched_yield - yield the current processor to other threads.
1648 * this function yields the current CPU by moving the calling thread
1649 * to the expired array. If there are no other threads running on this
1650 * CPU then this function will return.
1652 asmlinkage long sys_sched_yield(void)
1654 runqueue_t *rq = this_rq_lock();
1655 prio_array_t *array = current->array;
1658 * We implement yielding by moving the task into the expired
1659 * queue.
1661 * (special rule: RT tasks will just roundrobin in the active
1662 * array.)
1664 if (likely(!rt_task(current))) {
1665 dequeue_task(current, array);
1666 enqueue_task(current, rq->expired);
1667 } else {
1668 list_del(&current->run_list);
1669 list_add_tail(&current->run_list, array->queue + current->prio);
1672 * Since we are going to call schedule() anyway, there's
1673 * no need to preempt:
1675 _raw_spin_unlock(&rq->lock);
1676 preempt_enable_no_resched();
1678 schedule();
1680 return 0;
1683 void __cond_resched(void)
1685 set_current_state(TASK_RUNNING);
1686 schedule();
1690 * yield - yield the current processor to other threads.
1692 * this is a shortcut for kernel-space yielding - it marks the
1693 * thread runnable and calls sys_sched_yield().
1695 void yield(void)
1697 set_current_state(TASK_RUNNING);
1698 sys_sched_yield();
1702 * sys_sched_get_priority_max - return maximum RT priority.
1703 * @policy: scheduling class.
1705 * this syscall returns the maximum rt_priority that can be used
1706 * by a given scheduling class.
1708 asmlinkage long sys_sched_get_priority_max(int policy)
1710 int ret = -EINVAL;
1712 switch (policy) {
1713 case SCHED_FIFO:
1714 case SCHED_RR:
1715 ret = MAX_USER_RT_PRIO-1;
1716 break;
1717 case SCHED_NORMAL:
1718 ret = 0;
1719 break;
1721 return ret;
1725 * sys_sched_get_priority_mix - return minimum RT priority.
1726 * @policy: scheduling class.
1728 * this syscall returns the minimum rt_priority that can be used
1729 * by a given scheduling class.
1731 asmlinkage long sys_sched_get_priority_min(int policy)
1733 int ret = -EINVAL;
1735 switch (policy) {
1736 case SCHED_FIFO:
1737 case SCHED_RR:
1738 ret = 1;
1739 break;
1740 case SCHED_NORMAL:
1741 ret = 0;
1743 return ret;
1747 * sys_sched_rr_get_interval - return the default timeslice of a process.
1748 * @pid: pid of the process.
1749 * @interval: userspace pointer to the timeslice value.
1751 * this syscall writes the default timeslice value of a given process
1752 * into the user-space timespec buffer. A value of '0' means infinity.
1754 asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1756 int retval = -EINVAL;
1757 struct timespec t;
1758 task_t *p;
1760 if (pid < 0)
1761 goto out_nounlock;
1763 retval = -ESRCH;
1764 read_lock(&tasklist_lock);
1765 p = find_process_by_pid(pid);
1766 if (!p)
1767 goto out_unlock;
1769 retval = security_ops->task_getscheduler(p);
1770 if (retval)
1771 goto out_unlock;
1773 jiffies_to_timespec(p->policy & SCHED_FIFO ?
1774 0 : task_timeslice(p), &t);
1775 read_unlock(&tasklist_lock);
1776 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1777 out_nounlock:
1778 return retval;
1779 out_unlock:
1780 read_unlock(&tasklist_lock);
1781 return retval;
1784 static void show_task(task_t * p)
1786 unsigned long free = 0;
1787 task_t *relative;
1788 int state;
1789 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1791 printk("%-13.13s ", p->comm);
1792 state = p->state ? __ffs(p->state) + 1 : 0;
1793 if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
1794 printk(stat_nam[state]);
1795 else
1796 printk(" ");
1797 #if (BITS_PER_LONG == 32)
1798 if (p == current)
1799 printk(" current ");
1800 else
1801 printk(" %08lX ", thread_saved_pc(p));
1802 #else
1803 if (p == current)
1804 printk(" current task ");
1805 else
1806 printk(" %016lx ", thread_saved_pc(p));
1807 #endif
1809 unsigned long * n = (unsigned long *) (p+1);
1810 while (!*n)
1811 n++;
1812 free = (unsigned long) n - (unsigned long)(p+1);
1814 printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
1815 if ((relative = eldest_child(p)))
1816 printk("%5d ", relative->pid);
1817 else
1818 printk(" ");
1819 if ((relative = younger_sibling(p)))
1820 printk("%7d", relative->pid);
1821 else
1822 printk(" ");
1823 if ((relative = older_sibling(p)))
1824 printk(" %5d", relative->pid);
1825 else
1826 printk(" ");
1827 if (!p->mm)
1828 printk(" (L-TLB)\n");
1829 else
1830 printk(" (NOTLB)\n");
1833 extern void show_trace_task(task_t *tsk);
1834 show_trace_task(p);
1838 char * render_sigset_t(sigset_t *set, char *buffer)
1840 int i = _NSIG, x;
1841 do {
1842 i -= 4, x = 0;
1843 if (sigismember(set, i+1)) x |= 1;
1844 if (sigismember(set, i+2)) x |= 2;
1845 if (sigismember(set, i+3)) x |= 4;
1846 if (sigismember(set, i+4)) x |= 8;
1847 *buffer++ = (x < 10 ? '0' : 'a' - 10) + x;
1848 } while (i >= 4);
1849 *buffer = 0;
1850 return buffer;
1853 void show_state(void)
1855 task_t *g, *p;
1857 #if (BITS_PER_LONG == 32)
1858 printk("\n"
1859 " free sibling\n");
1860 printk(" task PC stack pid father child younger older\n");
1861 #else
1862 printk("\n"
1863 " free sibling\n");
1864 printk(" task PC stack pid father child younger older\n");
1865 #endif
1866 read_lock(&tasklist_lock);
1867 do_each_thread(g, p) {
1869 * reset the NMI-timeout, listing all files on a slow
1870 * console might take alot of time:
1872 touch_nmi_watchdog();
1873 show_task(p);
1874 } while_each_thread(g, p);
1876 read_unlock(&tasklist_lock);
1879 void __init init_idle(task_t *idle, int cpu)
1881 runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
1882 unsigned long flags;
1884 local_irq_save(flags);
1885 double_rq_lock(idle_rq, rq);
1887 idle_rq->curr = idle_rq->idle = idle;
1888 deactivate_task(idle, rq);
1889 idle->array = NULL;
1890 idle->prio = MAX_PRIO;
1891 idle->state = TASK_RUNNING;
1892 set_task_cpu(idle, cpu);
1893 double_rq_unlock(idle_rq, rq);
1894 set_tsk_need_resched(idle);
1895 local_irq_restore(flags);
1897 /* Set the preempt count _outside_ the spinlocks! */
1898 #if CONFIG_PREEMPT
1899 idle->thread_info->preempt_count = (idle->lock_depth >= 0);
1900 #else
1901 idle->thread_info->preempt_count = 0;
1902 #endif
1905 #if CONFIG_SMP
1907 * This is how migration works:
1909 * 1) we queue a migration_req_t structure in the source CPU's
1910 * runqueue and wake up that CPU's migration thread.
1911 * 2) we down() the locked semaphore => thread blocks.
1912 * 3) migration thread wakes up (implicitly it forces the migrated
1913 * thread off the CPU)
1914 * 4) it gets the migration request and checks whether the migrated
1915 * task is still in the wrong runqueue.
1916 * 5) if it's in the wrong runqueue then the migration thread removes
1917 * it and puts it into the right queue.
1918 * 6) migration thread up()s the semaphore.
1919 * 7) we wake up and the migration is done.
1922 typedef struct {
1923 struct list_head list;
1924 task_t *task;
1925 struct completion done;
1926 } migration_req_t;
1929 * Change a given task's CPU affinity. Migrate the thread to a
1930 * proper CPU and schedule it away if the CPU it's executing on
1931 * is removed from the allowed bitmask.
1933 * NOTE: the caller must have a valid reference to the task, the
1934 * task must not exit() & deallocate itself prematurely. The
1935 * call is not atomic; no spinlocks may be held.
1937 void set_cpus_allowed(task_t *p, unsigned long new_mask)
1939 unsigned long flags;
1940 migration_req_t req;
1941 runqueue_t *rq;
1943 #if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
1944 new_mask &= cpu_online_map;
1945 if (!new_mask)
1946 BUG();
1947 #endif
1949 preempt_disable();
1950 rq = task_rq_lock(p, &flags);
1951 p->cpus_allowed = new_mask;
1953 * Can the task run on the task's current CPU? If not then
1954 * migrate the thread off to a proper CPU.
1956 if (new_mask & (1UL << task_cpu(p))) {
1957 task_rq_unlock(rq, &flags);
1958 goto out;
1961 * If the task is not on a runqueue (and not running), then
1962 * it is sufficient to simply update the task's cpu field.
1964 if (!p->array && !task_running(rq, p)) {
1965 set_task_cpu(p, __ffs(p->cpus_allowed));
1966 task_rq_unlock(rq, &flags);
1967 goto out;
1969 init_completion(&req.done);
1970 req.task = p;
1971 list_add(&req.list, &rq->migration_queue);
1972 task_rq_unlock(rq, &flags);
1973 wake_up_process(rq->migration_thread);
1975 wait_for_completion(&req.done);
1976 out:
1977 preempt_enable();
1981 * migration_thread - this is a highprio system thread that performs
1982 * thread migration by 'pulling' threads into the target runqueue.
1984 static int migration_thread(void * data)
1986 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1987 int cpu = (long) data;
1988 runqueue_t *rq;
1989 int ret;
1991 daemonize();
1992 sigfillset(&current->blocked);
1993 set_fs(KERNEL_DS);
1995 set_cpus_allowed(current, 1UL << cpu);
1998 * Migration can happen without a migration thread on the
1999 * target CPU because here we remove the thread from the
2000 * runqueue and the helper thread then moves this thread
2001 * to the target CPU - we'll wake up there.
2003 if (smp_processor_id() != cpu)
2004 printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
2005 ret = setscheduler(0, SCHED_FIFO, &param);
2007 rq = this_rq();
2008 rq->migration_thread = current;
2010 sprintf(current->comm, "migration_CPU%d", smp_processor_id());
2012 for (;;) {
2013 runqueue_t *rq_src, *rq_dest;
2014 struct list_head *head;
2015 int cpu_src, cpu_dest;
2016 migration_req_t *req;
2017 unsigned long flags;
2018 task_t *p;
2020 spin_lock_irqsave(&rq->lock, flags);
2021 head = &rq->migration_queue;
2022 current->state = TASK_INTERRUPTIBLE;
2023 if (list_empty(head)) {
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025 schedule();
2026 continue;
2028 req = list_entry(head->next, migration_req_t, list);
2029 list_del_init(head->next);
2030 spin_unlock_irqrestore(&rq->lock, flags);
2032 p = req->task;
2033 cpu_dest = __ffs(p->cpus_allowed);
2034 rq_dest = cpu_rq(cpu_dest);
2035 repeat:
2036 cpu_src = task_cpu(p);
2037 rq_src = cpu_rq(cpu_src);
2039 local_irq_save(flags);
2040 double_rq_lock(rq_src, rq_dest);
2041 if (task_cpu(p) != cpu_src) {
2042 double_rq_unlock(rq_src, rq_dest);
2043 local_irq_restore(flags);
2044 goto repeat;
2046 if (rq_src == rq) {
2047 set_task_cpu(p, cpu_dest);
2048 if (p->array) {
2049 deactivate_task(p, rq_src);
2050 activate_task(p, rq_dest);
2053 double_rq_unlock(rq_src, rq_dest);
2054 local_irq_restore(flags);
2056 complete(&req->done);
2061 * migration_call - callback that gets triggered when a CPU is added.
2062 * Here we can start up the necessary migration thread for the new CPU.
2064 static int migration_call(struct notifier_block *nfb,
2065 unsigned long action,
2066 void *hcpu)
2068 switch (action) {
2069 case CPU_ONLINE:
2070 printk("Starting migration thread for cpu %li\n",
2071 (long)hcpu);
2072 kernel_thread(migration_thread, hcpu, CLONE_KERNEL);
2073 while (!cpu_rq((long)hcpu)->migration_thread)
2074 yield();
2075 break;
2077 return NOTIFY_OK;
2080 static struct notifier_block migration_notifier = { &migration_call, NULL, 0 };
2082 __init int migration_init(void)
2084 /* Start one for boot CPU. */
2085 migration_call(&migration_notifier, CPU_ONLINE,
2086 (void *)(long)smp_processor_id());
2087 register_cpu_notifier(&migration_notifier);
2088 return 0;
2091 #endif
2093 #if CONFIG_SMP || CONFIG_PREEMPT
2095 * The 'big kernel lock'
2097 * This spinlock is taken and released recursively by lock_kernel()
2098 * and unlock_kernel(). It is transparently dropped and reaquired
2099 * over schedule(). It is used to protect legacy code that hasn't
2100 * been migrated to a proper locking design yet.
2102 * Don't use in new code.
2104 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
2105 #endif
2107 extern void init_timers(void);
2109 void __init sched_init(void)
2111 runqueue_t *rq;
2112 int i, j, k;
2114 for (i = 0; i < NR_CPUS; i++) {
2115 prio_array_t *array;
2117 rq = cpu_rq(i);
2118 rq->active = rq->arrays;
2119 rq->expired = rq->arrays + 1;
2120 spin_lock_init(&rq->lock);
2121 INIT_LIST_HEAD(&rq->migration_queue);
2123 for (j = 0; j < 2; j++) {
2124 array = rq->arrays + j;
2125 for (k = 0; k < MAX_PRIO; k++) {
2126 INIT_LIST_HEAD(array->queue + k);
2127 __clear_bit(k, array->bitmap);
2129 // delimiter for bitsearch
2130 __set_bit(MAX_PRIO, array->bitmap);
2134 * We have to do a little magic to get the first
2135 * thread right in SMP mode.
2137 rq = this_rq();
2138 rq->curr = current;
2139 rq->idle = current;
2140 set_task_cpu(current, smp_processor_id());
2141 wake_up_process(current);
2143 init_timers();
2146 * The boot idle thread does lazy MMU switching as well:
2148 atomic_inc(&init_mm.mm_count);
2149 enter_lazy_tlb(&init_mm, current, smp_processor_id());
2152 #ifdef CONFIG_DEBUG_KERNEL
2153 void __might_sleep(char *file, int line)
2155 #if defined(in_atomic)
2156 static unsigned long prev_jiffy; /* ratelimiting */
2158 if (in_atomic()) {
2159 if (time_before(jiffies, prev_jiffy + HZ))
2160 return;
2161 prev_jiffy = jiffies;
2162 printk(KERN_ERR "Debug: sleeping function called from illegal"
2163 " context at %s:%d\n", file, line);
2164 dump_stack();
2166 #endif
2168 #endif