4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
16 * 'sched.c' is the main kernel file. It contains scheduling primitives
17 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
18 * call functions (type getpid()), which just extract a field from
22 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel_stat.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
32 extern void timer_bh(void);
33 extern void tqueue_bh(void);
34 extern void immediate_bh(void);
40 unsigned securebits
= SECUREBITS_DEFAULT
; /* systemwide security settings */
42 extern void mem_use(void);
47 * NOTE! The unix "nice" value influences how long a process
48 * gets. The nice value ranges from -20 to +19, where a -20
49 * is a "high-priority" task, and a "+10" is a low-priority
52 * We want the time-slice to be around 50ms or so, so this
53 * calculation depends on the value of HZ.
56 #define TICK_SCALE(x) ((x) >> 2)
58 #define TICK_SCALE(x) ((x) >> 1)
60 #define TICK_SCALE(x) (x)
62 #define TICK_SCALE(x) ((x) << 1)
64 #define TICK_SCALE(x) ((x) << 2)
67 #define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
71 * Init task must be ok at boot for the ix86 as we will check its signals
72 * via the SMP irq return path.
75 struct task_struct
* init_tasks
[NR_CPUS
] = {&init_task
, };
78 * The tasklist_lock protects the linked list of processes.
80 * The scheduler lock is protecting against multiple entry
81 * into the scheduling code, and doesn't need to worry
82 * about interrupts (because interrupts cannot call the
85 * The run-queue lock locks the parts that actually access
86 * and change the run-queues, and have to be interrupt-safe.
88 spinlock_t runqueue_lock __cacheline_aligned
= SPIN_LOCK_UNLOCKED
; /* second */
89 rwlock_t tasklist_lock __cacheline_aligned
= RW_LOCK_UNLOCKED
; /* third */
91 static LIST_HEAD(runqueue_head
);
94 * We align per-CPU scheduling data on cacheline boundaries,
95 * to prevent cacheline ping-pong.
98 struct schedule_data
{
99 struct task_struct
* curr
;
100 cycles_t last_schedule
;
102 char __pad
[SMP_CACHE_BYTES
];
103 } aligned_data
[NR_CPUS
] __cacheline_aligned
= { {{&init_task
,0}}};
105 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
106 #define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
108 struct kernel_stat kstat
;
112 #define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
113 #define can_schedule(p,cpu) ((!(p)->has_cpu) && \
114 ((p)->cpus_allowed & (1 << cpu)))
118 #define idle_task(cpu) (&init_task)
119 #define can_schedule(p,cpu) (1)
123 void scheduling_functions_start_here(void) { }
126 * This is the function that decides how desirable a process is..
127 * You can weigh different processes against each other depending
128 * on what CPU they've run on lately etc to try to handle cache
129 * and TLB miss penalties.
132 * -1000: never select this
133 * 0: out of time, recalculate counters (but it might still be
135 * +ve: "goodness" value (the larger, the better)
136 * +1000: realtime process, select this.
139 static inline int goodness(struct task_struct
* p
, int this_cpu
, struct mm_struct
*this_mm
)
144 * select the current process after every other
145 * runnable process, but before the idle thread.
146 * Also, dont trigger a counter recalculation.
149 if (p
->policy
& SCHED_YIELD
)
153 * Non-RT process - normal case first.
155 if (p
->policy
== SCHED_OTHER
) {
157 * Give the process a first-approximation goodness value
158 * according to the number of clock-ticks it has left.
160 * Don't do any other calculations if the time slice is
168 /* Give a largish advantage to the same processor... */
169 /* (this is equivalent to penalizing other processors) */
170 if (p
->processor
== this_cpu
)
171 weight
+= PROC_CHANGE_PENALTY
;
174 /* .. and a slight advantage to the current MM */
175 if (p
->mm
== this_mm
|| !p
->mm
)
177 weight
+= 20 - p
->nice
;
182 * Realtime process, select the first one on the
183 * runqueue (taking priorities within processes
186 weight
= 1000 + p
->rt_priority
;
192 * the 'goodness value' of replacing a process on a given CPU.
193 * positive value means 'replace', zero or negative means 'dont'.
195 static inline int preemption_goodness(struct task_struct
* prev
, struct task_struct
* p
, int cpu
)
197 return goodness(p
, cpu
, prev
->active_mm
) - goodness(prev
, cpu
, prev
->active_mm
);
201 * This is ugly, but reschedule_idle() is very timing-critical.
202 * We enter with the runqueue spinlock held, but we might end
203 * up unlocking it early, so the caller must not unlock the
204 * runqueue, it's always done by reschedule_idle().
206 * This function must be inline as anything that saves and restores
207 * flags has to do so within the same register window on sparc (Anton)
209 static FASTCALL(void reschedule_idle(struct task_struct
* p
));
211 static void reschedule_idle(struct task_struct
* p
)
214 int this_cpu
= smp_processor_id();
215 struct task_struct
*tsk
, *target_tsk
;
216 int cpu
, best_cpu
, i
, max_prio
;
217 cycles_t oldest_idle
;
220 * shortcut if the woken up task's last CPU is
223 best_cpu
= p
->processor
;
224 if (can_schedule(p
, best_cpu
)) {
225 tsk
= idle_task(best_cpu
);
226 if (cpu_curr(best_cpu
) == tsk
) {
230 * If need_resched == -1 then we can skip sending
231 * the IPI altogether, tsk->need_resched is
232 * actively watched by the idle thread.
234 need_resched
= tsk
->need_resched
;
235 tsk
->need_resched
= 1;
236 if ((best_cpu
!= this_cpu
) && !need_resched
)
237 smp_send_reschedule(best_cpu
);
243 * We know that the preferred CPU has a cache-affine current
244 * process, lets try to find a new idle CPU for the woken-up
245 * process. Select the least recently active idle CPU. (that
246 * one will have the least active cache context.) Also find
247 * the executing process which has the least priority.
249 oldest_idle
= (cycles_t
) -1;
253 for (i
= 0; i
< smp_num_cpus
; i
++) {
254 cpu
= cpu_logical_map(i
);
255 if (!can_schedule(p
, cpu
))
259 * We use the first available idle CPU. This creates
260 * a priority list between idle CPUs, but this is not
263 if (tsk
== idle_task(cpu
)) {
264 if (last_schedule(cpu
) < oldest_idle
) {
265 oldest_idle
= last_schedule(cpu
);
269 if (oldest_idle
== -1ULL) {
270 int prio
= preemption_goodness(tsk
, p
, cpu
);
272 if (prio
> max_prio
) {
281 if (oldest_idle
!= -1ULL)
283 tsk
->need_resched
= 1;
284 if (tsk
->processor
!= this_cpu
)
285 smp_send_reschedule(tsk
->processor
);
291 int this_cpu
= smp_processor_id();
292 struct task_struct
*tsk
;
294 tsk
= cpu_curr(this_cpu
);
295 if (preemption_goodness(tsk
, p
, this_cpu
) > 1)
296 tsk
->need_resched
= 1;
303 * This has to add the process to the _beginning_ of the
304 * run-queue, not the end. See the comment about "This is
305 * subtle" in the scheduler proper..
307 static inline void add_to_runqueue(struct task_struct
* p
)
309 list_add(&p
->run_list
, &runqueue_head
);
313 static inline void move_last_runqueue(struct task_struct
* p
)
315 list_del(&p
->run_list
);
316 list_add_tail(&p
->run_list
, &runqueue_head
);
319 static inline void move_first_runqueue(struct task_struct
* p
)
321 list_del(&p
->run_list
);
322 list_add(&p
->run_list
, &runqueue_head
);
326 * Wake up a process. Put it on the run-queue if it's not
327 * already there. The "current" process is always on the
328 * run-queue (except when the actual re-schedule is in
329 * progress), and as such you're allowed to do the simpler
330 * "current->state = TASK_RUNNING" to mark yourself runnable
331 * without the overhead of this.
333 inline void wake_up_process(struct task_struct
* p
)
338 * We want the common case fall through straight, thus the goto.
340 spin_lock_irqsave(&runqueue_lock
, flags
);
341 p
->state
= TASK_RUNNING
;
342 if (task_on_runqueue(p
))
347 spin_unlock_irqrestore(&runqueue_lock
, flags
);
350 static inline void wake_up_process_synchronous(struct task_struct
* p
)
355 * We want the common case fall through straight, thus the goto.
357 spin_lock_irqsave(&runqueue_lock
, flags
);
358 p
->state
= TASK_RUNNING
;
359 if (task_on_runqueue(p
))
363 spin_unlock_irqrestore(&runqueue_lock
, flags
);
366 static void process_timeout(unsigned long __data
)
368 struct task_struct
* p
= (struct task_struct
*) __data
;
373 signed long schedule_timeout(signed long timeout
)
375 struct timer_list timer
;
376 unsigned long expire
;
380 case MAX_SCHEDULE_TIMEOUT
:
382 * These two special cases are useful to be comfortable
383 * in the caller. Nothing more. We could take
384 * MAX_SCHEDULE_TIMEOUT from one of the negative value
385 * but I' d like to return a valid offset (>=0) to allow
386 * the caller to do everything it want with the retval.
392 * Another bit of PARANOID. Note that the retval will be
393 * 0 since no piece of kernel is supposed to do a check
394 * for a negative retval of schedule_timeout() (since it
395 * should never happens anyway). You just have the printk()
396 * that will tell you if something is gone wrong and where.
400 printk(KERN_ERR
"schedule_timeout: wrong timeout "
401 "value %lx from %p\n", timeout
,
402 __builtin_return_address(0));
403 current
->state
= TASK_RUNNING
;
408 expire
= timeout
+ jiffies
;
411 timer
.expires
= expire
;
412 timer
.data
= (unsigned long) current
;
413 timer
.function
= process_timeout
;
417 del_timer_sync(&timer
);
419 timeout
= expire
- jiffies
;
422 return timeout
< 0 ? 0 : timeout
;
426 * schedule_tail() is getting called from the fork return path. This
427 * cleans up all remaining scheduler things, without impacting the
430 static inline void __schedule_tail(struct task_struct
*prev
)
436 * fast path falls through. We have to clear has_cpu before
437 * checking prev->state to avoid a wakeup race - thus we
438 * also have to protect against the task exiting early.
441 policy
= prev
->policy
;
442 prev
->policy
= policy
& ~SCHED_YIELD
;
445 if (prev
->state
== TASK_RUNNING
)
453 * Slow path - we 'push' the previous process and
454 * reschedule_idle() will attempt to find a new
455 * processor for it. (but it might preempt the
456 * current process as well.) We must take the runqueue
457 * lock and re-check prev->state to be correct. It might
458 * still happen that this process has a preemption
459 * 'in progress' already - but this is not a problem and
460 * might happen in other circumstances as well.
467 * Avoid taking the runqueue lock in cases where
468 * no preemption-check is necessery:
470 if ((prev
== idle_task(smp_processor_id())) ||
471 (policy
& SCHED_YIELD
))
474 spin_lock_irqsave(&runqueue_lock
, flags
);
475 if (prev
->state
== TASK_RUNNING
)
476 reschedule_idle(prev
);
477 spin_unlock_irqrestore(&runqueue_lock
, flags
);
481 prev
->policy
&= ~SCHED_YIELD
;
482 #endif /* CONFIG_SMP */
485 void schedule_tail(struct task_struct
*prev
)
487 __schedule_tail(prev
);
491 * 'schedule()' is the scheduler function. It's a very simple and nice
492 * scheduler: it's not perfect, but certainly works for most things.
494 * The goto is "interesting".
496 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
497 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
498 * information in task[0] is never used.
500 asmlinkage
void schedule(void)
502 struct schedule_data
* sched_data
;
503 struct task_struct
*prev
, *next
, *p
;
504 struct list_head
*tmp
;
507 if (!current
->active_mm
) BUG();
509 goto handle_tq_scheduler
;
513 this_cpu
= prev
->processor
;
516 goto scheduling_in_interrupt
;
518 release_kernel_lock(prev
, this_cpu
);
520 /* Do "administrative" work here while we don't hold any locks */
521 if (softirq_active(this_cpu
) & softirq_mask(this_cpu
))
526 * 'sched_data' is protected by the fact that we can run
527 * only one process per CPU.
529 sched_data
= & aligned_data
[this_cpu
].schedule_data
;
531 spin_lock_irq(&runqueue_lock
);
533 /* move an exhausted RR process to be last.. */
534 if (prev
->policy
== SCHED_RR
)
538 switch (prev
->state
& ~TASK_EXCLUSIVE
) {
539 case TASK_INTERRUPTIBLE
:
540 if (signal_pending(prev
)) {
541 prev
->state
= TASK_RUNNING
;
545 del_from_runqueue(prev
);
548 prev
->need_resched
= 0;
551 * this is the scheduler proper:
556 * Default process to select..
558 next
= idle_task(this_cpu
);
560 if (prev
->state
== TASK_RUNNING
)
564 list_for_each(tmp
, &runqueue_head
) {
565 p
= list_entry(tmp
, struct task_struct
, run_list
);
566 if (can_schedule(p
, this_cpu
)) {
567 int weight
= goodness(p
, this_cpu
, prev
->active_mm
);
569 c
= weight
, next
= p
;
573 /* Do we need to re-calculate counters? */
577 * from this point on nothing can prevent us from
578 * switching to the next task, save this fact in
581 sched_data
->curr
= next
;
584 next
->processor
= this_cpu
;
586 spin_unlock_irq(&runqueue_lock
);
593 * maintain the per-process 'last schedule' value.
594 * (this has to be recalculated even if we reschedule to
595 * the same process) Currently this is only used on SMP,
596 * and it's approximate, so we do not have to maintain
597 * it while holding the runqueue spinlock.
599 sched_data
->last_schedule
= get_cycles();
602 * We drop the scheduler lock early (it's a global spinlock),
603 * thus we have to lock the previous process from getting
604 * rescheduled during switch_to().
607 #endif /* CONFIG_SMP */
609 kstat
.context_swtch
++;
611 * there are 3 processes which are affected by a context switch:
613 * prev == .... ==> (last => next)
615 * It's the 'much more previous' 'prev' that is on next's stack,
616 * but prev is set to (the just run) 'last' process by switch_to().
617 * This might sound slightly confusing but makes tons of sense.
621 struct mm_struct
*mm
= next
->mm
;
622 struct mm_struct
*oldmm
= prev
->active_mm
;
624 if (next
->active_mm
) BUG();
625 next
->active_mm
= oldmm
;
626 atomic_inc(&oldmm
->mm_count
);
627 enter_lazy_tlb(oldmm
, next
, this_cpu
);
629 if (next
->active_mm
!= mm
) BUG();
630 switch_mm(oldmm
, mm
, next
, this_cpu
);
634 prev
->active_mm
= NULL
;
640 * This just switches the register state and the
643 switch_to(prev
, next
, prev
);
644 __schedule_tail(prev
);
647 reacquire_kernel_lock(current
);
648 if (current
->need_resched
)
649 goto tq_scheduler_back
;
655 struct task_struct
*p
;
656 spin_unlock_irq(&runqueue_lock
);
657 read_lock(&tasklist_lock
);
659 p
->counter
= (p
->counter
>> 1) + NICE_TO_TICKS(p
->nice
);
660 read_unlock(&tasklist_lock
);
661 spin_lock_irq(&runqueue_lock
);
663 goto repeat_schedule
;
666 c
= goodness(prev
, this_cpu
, prev
->active_mm
);
668 goto still_running_back
;
672 goto handle_softirq_back
;
676 * do not run the task queue with disabled interrupts,
677 * cli() wouldn't work on SMP
680 run_task_queue(&tq_scheduler
);
681 goto tq_scheduler_back
;
684 if (!prev
->counter
) {
685 prev
->counter
= NICE_TO_TICKS(prev
->nice
);
686 move_last_runqueue(prev
);
690 scheduling_in_interrupt
:
691 printk("Scheduling in interrupt\n");
696 static inline void __wake_up_common (wait_queue_head_t
*q
, unsigned int mode
,
699 struct list_head
*tmp
, *head
;
700 struct task_struct
*p
, *best_exclusive
;
704 if (!q
|| !waitqueue_active(q
))
707 best_cpu
= smp_processor_id();
708 irq
= in_interrupt();
709 best_exclusive
= NULL
;
710 wq_write_lock_irqsave(&q
->lock
, flags
);
713 CHECK_MAGIC_WQHEAD(q
);
716 head
= &q
->task_list
;
718 if (!head
->next
|| !head
->prev
)
722 while (tmp
!= head
) {
724 wait_queue_t
*curr
= list_entry(tmp
, wait_queue_t
, task_list
);
729 CHECK_MAGIC(curr
->__magic
);
733 if (state
& (mode
& ~TASK_EXCLUSIVE
)) {
735 curr
->__waker
= (long)__builtin_return_address(0);
738 * If waking up from an interrupt context then
739 * prefer processes which are affine to this
742 if (irq
&& (state
& mode
& TASK_EXCLUSIVE
)) {
745 if (p
->processor
== best_cpu
) {
751 wake_up_process_synchronous(p
);
754 if (state
& mode
& TASK_EXCLUSIVE
)
759 if (best_exclusive
) {
761 wake_up_process_synchronous(best_exclusive
);
763 wake_up_process(best_exclusive
);
765 wq_write_unlock_irqrestore(&q
->lock
, flags
);
770 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
)
772 __wake_up_common(q
, mode
, 0);
775 void __wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
)
777 __wake_up_common(q
, mode
, 1);
780 #define SLEEP_ON_VAR \
781 unsigned long flags; \
783 init_waitqueue_entry(&wait, current);
785 #define SLEEP_ON_HEAD \
786 wq_write_lock_irqsave(&q->lock,flags); \
787 __add_wait_queue(q, &wait); \
788 wq_write_unlock(&q->lock);
790 #define SLEEP_ON_TAIL \
791 wq_write_lock_irq(&q->lock); \
792 __remove_wait_queue(q, &wait); \
793 wq_write_unlock_irqrestore(&q->lock,flags);
795 void interruptible_sleep_on(wait_queue_head_t
*q
)
799 current
->state
= TASK_INTERRUPTIBLE
;
806 long interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
810 current
->state
= TASK_INTERRUPTIBLE
;
813 timeout
= schedule_timeout(timeout
);
819 void sleep_on(wait_queue_head_t
*q
)
823 current
->state
= TASK_UNINTERRUPTIBLE
;
830 long sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
834 current
->state
= TASK_UNINTERRUPTIBLE
;
837 timeout
= schedule_timeout(timeout
);
843 void scheduling_functions_end_here(void) { }
848 * This has been replaced by sys_setpriority. Maybe it should be
849 * moved into the arch dependent tree for those ports that require
850 * it for backward compatibility?
853 asmlinkage
long sys_nice(int increment
)
858 * Setpriority might change our priority at the same moment.
859 * We don't have to worry. Conceptually one call occurs first
860 * and we have a single winner.
863 if (!capable(CAP_SYS_NICE
))
871 newprio
= current
->nice
+ increment
;
876 current
->nice
= newprio
;
882 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
884 struct task_struct
*tsk
= current
;
887 tsk
= find_task_by_pid(pid
);
891 static int setscheduler(pid_t pid
, int policy
,
892 struct sched_param
*param
)
894 struct sched_param lp
;
895 struct task_struct
*p
;
899 if (!param
|| pid
< 0)
903 if (copy_from_user(&lp
, param
, sizeof(struct sched_param
)))
907 * We play safe to avoid deadlocks.
909 spin_lock_irq(&runqueue_lock
);
910 read_lock(&tasklist_lock
);
912 p
= find_process_by_pid(pid
);
922 if (policy
!= SCHED_FIFO
&& policy
!= SCHED_RR
&&
923 policy
!= SCHED_OTHER
)
928 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
929 * priority for SCHED_OTHER is 0.
932 if (lp
.sched_priority
< 0 || lp
.sched_priority
> 99)
934 if ((policy
== SCHED_OTHER
) != (lp
.sched_priority
== 0))
938 if ((policy
== SCHED_FIFO
|| policy
== SCHED_RR
) &&
939 !capable(CAP_SYS_NICE
))
941 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
942 !capable(CAP_SYS_NICE
))
947 p
->rt_priority
= lp
.sched_priority
;
948 if (task_on_runqueue(p
))
949 move_first_runqueue(p
);
951 current
->need_resched
= 1;
954 read_unlock(&tasklist_lock
);
955 spin_unlock_irq(&runqueue_lock
);
961 asmlinkage
long sys_sched_setscheduler(pid_t pid
, int policy
,
962 struct sched_param
*param
)
964 return setscheduler(pid
, policy
, param
);
967 asmlinkage
long sys_sched_setparam(pid_t pid
, struct sched_param
*param
)
969 return setscheduler(pid
, -1, param
);
972 asmlinkage
long sys_sched_getscheduler(pid_t pid
)
974 struct task_struct
*p
;
982 read_lock(&tasklist_lock
);
983 p
= find_process_by_pid(pid
);
985 retval
= p
->policy
& ~SCHED_YIELD
;
986 read_unlock(&tasklist_lock
);
992 asmlinkage
long sys_sched_getparam(pid_t pid
, struct sched_param
*param
)
994 struct task_struct
*p
;
995 struct sched_param lp
;
999 if (!param
|| pid
< 0)
1002 read_lock(&tasklist_lock
);
1003 p
= find_process_by_pid(pid
);
1007 lp
.sched_priority
= p
->rt_priority
;
1008 read_unlock(&tasklist_lock
);
1011 * This one might sleep, we cannot do it with a spinlock held ...
1013 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
1019 read_unlock(&tasklist_lock
);
1023 asmlinkage
long sys_sched_yield(void)
1026 * Trick. sched_yield() first counts the number of truly
1027 * 'pending' runnable processes, then returns if it's
1028 * only the current processes. (This test does not have
1029 * to be atomic.) In threaded applications this optimization
1030 * gets triggered quite often.
1033 int nr_pending
= nr_running
;
1038 // Substract non-idle processes running on other CPUs.
1039 for (i
= 0; i
< smp_num_cpus
; i
++)
1040 if (aligned_data
[i
].schedule_data
.curr
!= idle_task(i
))
1043 // on UP this process is on the runqueue as well
1048 * This process can only be rescheduled by us,
1049 * so this is safe without any locking.
1051 if (current
->policy
== SCHED_OTHER
)
1052 current
->policy
|= SCHED_YIELD
;
1053 current
->need_resched
= 1;
1058 asmlinkage
long sys_sched_get_priority_max(int policy
)
1074 asmlinkage
long sys_sched_get_priority_min(int policy
)
1089 asmlinkage
long sys_sched_rr_get_interval(pid_t pid
, struct timespec
*interval
)
1092 struct task_struct
*p
;
1093 int retval
= -EINVAL
;
1099 read_lock(&tasklist_lock
);
1100 p
= find_process_by_pid(pid
);
1102 jiffies_to_timespec(p
->policy
& SCHED_FIFO
? 0 : NICE_TO_TICKS(p
->nice
),
1104 read_unlock(&tasklist_lock
);
1106 retval
= copy_to_user(interval
, &t
, sizeof(t
)) ? -EFAULT
: 0;
1111 static void show_task(struct task_struct
* p
)
1113 unsigned long free
= 0;
1115 static const char * stat_nam
[] = { "R", "S", "D", "Z", "T", "W" };
1117 printk("%-8s ", p
->comm
);
1118 state
= p
->state
? ffz(~p
->state
) + 1 : 0;
1119 if (((unsigned) state
) < sizeof(stat_nam
)/sizeof(char *))
1120 printk(stat_nam
[state
]);
1123 #if (BITS_PER_LONG == 32)
1125 printk(" current ");
1127 printk(" %08lX ", thread_saved_pc(&p
->thread
));
1130 printk(" current task ");
1132 printk(" %016lx ", thread_saved_pc(&p
->thread
));
1135 unsigned long * n
= (unsigned long *) (p
+1);
1138 free
= (unsigned long) n
- (unsigned long)(p
+1);
1140 printk("%5lu %5d %6d ", free
, p
->pid
, p
->p_pptr
->pid
);
1142 printk("%5d ", p
->p_cptr
->pid
);
1146 printk(" (L-TLB) ");
1148 printk(" (NOTLB) ");
1150 printk("%7d", p
->p_ysptr
->pid
);
1154 printk(" %5d\n", p
->p_osptr
->pid
);
1160 char s
[sizeof(sigset_t
)*2+1], b
[sizeof(sigset_t
)*2+1];
1162 render_sigset_t(&p
->pending
.signal
, s
);
1163 render_sigset_t(&p
->blocked
, b
);
1164 printk(" sig: %d %s %s :", signal_pending(p
), s
, b
);
1165 for (q
= p
->pending
.head
; q
; q
= q
->next
)
1166 printk(" %d", q
->info
.si_signo
);
1171 char * render_sigset_t(sigset_t
*set
, char *buffer
)
1176 if (sigismember(set
, i
+1)) x
|= 1;
1177 if (sigismember(set
, i
+2)) x
|= 2;
1178 if (sigismember(set
, i
+3)) x
|= 4;
1179 if (sigismember(set
, i
+4)) x
|= 8;
1180 *buffer
++ = (x
< 10 ? '0' : 'a' - 10) + x
;
1186 void show_state(void)
1188 struct task_struct
*p
;
1190 #if (BITS_PER_LONG == 32)
1193 printk(" task PC stack pid father child younger older\n");
1197 printk(" task PC stack pid father child younger older\n");
1199 read_lock(&tasklist_lock
);
1202 read_unlock(&tasklist_lock
);
1206 * Put all the gunge required to become a kernel thread without
1207 * attached user resources in one place where it belongs.
1210 void daemonize(void)
1212 struct fs_struct
*fs
;
1216 * If we were started as result of loading a module, close all of the
1217 * user space pages. We don't need them, and if we didn't close them
1218 * they would be locked into memory.
1222 current
->session
= 1;
1225 /* Become as one with the init task */
1227 exit_fs(current
); /* current->fs->count--; */
1230 atomic_inc(&fs
->count
);
1231 exit_files(current
);
1232 current
->files
= init_task
.files
;
1233 atomic_inc(¤t
->files
->count
);
1236 void __init
init_idle(void)
1238 struct schedule_data
* sched_data
;
1239 sched_data
= &aligned_data
[smp_processor_id()].schedule_data
;
1241 if (current
!= &init_task
&& task_on_runqueue(current
)) {
1242 printk("UGH! (%d:%d) was on the runqueue, removing.\n",
1243 smp_processor_id(), current
->pid
);
1244 del_from_runqueue(current
);
1246 sched_data
->curr
= current
;
1247 sched_data
->last_schedule
= get_cycles();
1250 extern void init_timervecs (void);
1252 void __init
sched_init(void)
1255 * We have to do a little magic to get the first
1256 * process right in SMP mode.
1258 int cpu
= smp_processor_id();
1261 init_task
.processor
= cpu
;
1263 for(nr
= 0; nr
< PIDHASH_SZ
; nr
++)
1268 init_bh(TIMER_BH
, timer_bh
);
1269 init_bh(TQUEUE_BH
, tqueue_bh
);
1270 init_bh(IMMEDIATE_BH
, immediate_bh
);
1273 * The boot idle thread does lazy MMU switching as well:
1275 atomic_inc(&init_mm
.mm_count
);
1276 enter_lazy_tlb(&init_mm
, current
, cpu
);