4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
16 * 'sched.c' is the main kernel file. It contains scheduling primitives
17 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
18 * call functions (type getpid()), which just extract a field from
23 #include <linux/init.h>
24 #include <linux/smp_lock.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel_stat.h>
28 #include <asm/uaccess.h>
29 #include <asm/mmu_context.h>
32 extern void timer_bh(void);
33 extern void tqueue_bh(void);
34 extern void immediate_bh(void);
40 unsigned securebits
= SECUREBITS_DEFAULT
; /* systemwide security settings */
42 extern void mem_use(void);
45 * Init task must be ok at boot for the ix86 as we will check its signals
46 * via the SMP irq return path.
49 struct task_struct
* init_tasks
[NR_CPUS
] = {&init_task
, };
52 * The tasklist_lock protects the linked list of processes.
54 * The scheduler lock is protecting against multiple entry
55 * into the scheduling code, and doesn't need to worry
56 * about interrupts (because interrupts cannot call the
59 * The run-queue lock locks the parts that actually access
60 * and change the run-queues, and have to be interrupt-safe.
62 spinlock_t runqueue_lock
= SPIN_LOCK_UNLOCKED
; /* second */
63 rwlock_t tasklist_lock
= RW_LOCK_UNLOCKED
; /* third */
65 static LIST_HEAD(runqueue_head
);
68 * We align per-CPU scheduling data on cacheline boundaries,
69 * to prevent cacheline ping-pong.
72 struct schedule_data
{
73 struct task_struct
* curr
;
74 cycles_t last_schedule
;
76 char __pad
[SMP_CACHE_BYTES
];
77 } aligned_data
[NR_CPUS
] __cacheline_aligned
= { {{&init_task
,0}}};
79 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
81 struct kernel_stat kstat
= { 0 };
85 #define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
86 #define can_schedule(p) (!(p)->has_cpu)
90 #define idle_task(cpu) (&init_task)
91 #define can_schedule(p) (1)
95 void scheduling_functions_start_here(void) { }
98 * This is the function that decides how desirable a process is..
99 * You can weigh different processes against each other depending
100 * on what CPU they've run on lately etc to try to handle cache
101 * and TLB miss penalties.
104 * -1000: never select this
105 * 0: out of time, recalculate counters (but it might still be
107 * +ve: "goodness" value (the larger, the better)
108 * +1000: realtime process, select this.
111 static inline int goodness(struct task_struct
* p
, int this_cpu
, struct mm_struct
*this_mm
)
116 * Realtime process, select the first one on the
117 * runqueue (taking priorities within processes
120 if (p
->policy
!= SCHED_OTHER
) {
121 weight
= 1000 + p
->rt_priority
;
126 * Give the process a first-approximation goodness value
127 * according to the number of clock-ticks it has left.
129 * Don't do any other calculations if the time slice is
137 /* Give a largish advantage to the same processor... */
138 /* (this is equivalent to penalizing other processors) */
139 if (p
->processor
== this_cpu
)
140 weight
+= PROC_CHANGE_PENALTY
;
143 /* .. and a slight advantage to the current MM */
144 if (p
->mm
== this_mm
|| !p
->mm
)
146 weight
+= p
->priority
;
153 * subtle. We want to discard a yielded process only if it's being
154 * considered for a reschedule. Wakeup-time 'queries' of the scheduling
155 * state do not count. Another optimization we do: sched_yield()-ed
156 * processes are runnable (and thus will be considered for scheduling)
157 * right when they are calling schedule(). So the only place we need
158 * to care about SCHED_YIELD is when we calculate the previous process'
161 static inline int prev_goodness(struct task_struct
* p
, int this_cpu
, struct mm_struct
*this_mm
)
163 if (p
->policy
& SCHED_YIELD
) {
164 p
->policy
&= ~SCHED_YIELD
;
167 return goodness(p
, this_cpu
, this_mm
);
171 * the 'goodness value' of replacing a process on a given CPU.
172 * positive value means 'replace', zero or negative means 'dont'.
174 static inline int preemption_goodness(struct task_struct
* prev
, struct task_struct
* p
, int cpu
)
176 return goodness(p
, cpu
, prev
->active_mm
) - goodness(prev
, cpu
, prev
->active_mm
);
180 * This is ugly, but reschedule_idle() is very timing-critical.
181 * We enter with the runqueue spinlock held, but we might end
182 * up unlocking it early, so the caller must not unlock the
183 * runqueue, it's always done by reschedule_idle().
185 static inline void reschedule_idle(struct task_struct
* p
, unsigned long flags
)
188 int this_cpu
= smp_processor_id(), target_cpu
;
189 struct task_struct
*tsk
;
190 int cpu
, best_cpu
, i
;
193 * shortcut if the woken up task's last CPU is
196 best_cpu
= p
->processor
;
197 tsk
= idle_task(best_cpu
);
198 if (cpu_curr(best_cpu
) == tsk
)
202 * We know that the preferred CPU has a cache-affine current
203 * process, lets try to find a new idle CPU for the woken-up
206 for (i
= smp_num_cpus
- 1; i
>= 0; i
--) {
207 cpu
= cpu_logical_map(i
);
212 * We use the last available idle CPU. This creates
213 * a priority list between idle CPUs, but this is not
216 if (tsk
== idle_task(cpu
))
221 * No CPU is idle, but maybe this process has enough priority
222 * to preempt it's preferred CPU.
224 tsk
= cpu_curr(best_cpu
);
225 if (preemption_goodness(tsk
, p
, best_cpu
) > 0)
229 * We will get here often - or in the high CPU contention
230 * case. No CPU is idle and this process is either lowprio or
231 * the preferred CPU is highprio. Try to preempt some other CPU
232 * only if it's RT or if it's iteractive and the preferred
233 * cpu won't reschedule shortly.
235 if (p
->avg_slice
< cacheflush_time
|| (p
->policy
& ~SCHED_YIELD
) != SCHED_OTHER
) {
236 for (i
= smp_num_cpus
- 1; i
>= 0; i
--) {
237 cpu
= cpu_logical_map(i
);
241 if (preemption_goodness(tsk
, p
, cpu
) > 0)
246 spin_unlock_irqrestore(&runqueue_lock
, flags
);
250 target_cpu
= tsk
->processor
;
251 tsk
->need_resched
= 1;
252 spin_unlock_irqrestore(&runqueue_lock
, flags
);
254 * the APIC stuff can go outside of the lock because
255 * it uses no task information, only CPU#.
257 if (target_cpu
!= this_cpu
)
258 smp_send_reschedule(target_cpu
);
261 int this_cpu
= smp_processor_id();
262 struct task_struct
*tsk
;
264 tsk
= cpu_curr(this_cpu
);
265 if (preemption_goodness(tsk
, p
, this_cpu
) > 0)
266 tsk
->need_resched
= 1;
267 spin_unlock_irqrestore(&runqueue_lock
, flags
);
274 * This has to add the process to the _beginning_ of the
275 * run-queue, not the end. See the comment about "This is
276 * subtle" in the scheduler proper..
278 static inline void add_to_runqueue(struct task_struct
* p
)
280 list_add(&p
->run_list
, &runqueue_head
);
284 static inline void move_last_runqueue(struct task_struct
* p
)
286 list_del(&p
->run_list
);
287 list_add_tail(&p
->run_list
, &runqueue_head
);
290 static inline void move_first_runqueue(struct task_struct
* p
)
292 list_del(&p
->run_list
);
293 list_add(&p
->run_list
, &runqueue_head
);
297 * Wake up a process. Put it on the run-queue if it's not
298 * already there. The "current" process is always on the
299 * run-queue (except when the actual re-schedule is in
300 * progress), and as such you're allowed to do the simpler
301 * "current->state = TASK_RUNNING" to mark yourself runnable
302 * without the overhead of this.
304 inline void wake_up_process(struct task_struct
* p
)
309 * We want the common case fall through straight, thus the goto.
311 spin_lock_irqsave(&runqueue_lock
, flags
);
312 p
->state
= TASK_RUNNING
;
313 if (task_on_runqueue(p
))
316 reschedule_idle(p
, flags
); // spin_unlocks runqueue
320 spin_unlock_irqrestore(&runqueue_lock
, flags
);
323 static inline void wake_up_process_synchronous(struct task_struct
* p
)
328 * We want the common case fall through straight, thus the goto.
330 spin_lock_irqsave(&runqueue_lock
, flags
);
331 p
->state
= TASK_RUNNING
;
332 if (task_on_runqueue(p
))
336 spin_unlock_irqrestore(&runqueue_lock
, flags
);
339 static void process_timeout(unsigned long __data
)
341 struct task_struct
* p
= (struct task_struct
*) __data
;
346 signed long schedule_timeout(signed long timeout
)
348 struct timer_list timer
;
349 unsigned long expire
;
353 case MAX_SCHEDULE_TIMEOUT
:
355 * These two special cases are useful to be comfortable
356 * in the caller. Nothing more. We could take
357 * MAX_SCHEDULE_TIMEOUT from one of the negative value
358 * but I' d like to return a valid offset (>=0) to allow
359 * the caller to do everything it want with the retval.
365 * Another bit of PARANOID. Note that the retval will be
366 * 0 since no piece of kernel is supposed to do a check
367 * for a negative retval of schedule_timeout() (since it
368 * should never happens anyway). You just have the printk()
369 * that will tell you if something is gone wrong and where.
373 printk(KERN_ERR
"schedule_timeout: wrong timeout "
374 "value %lx from %p\n", timeout
,
375 __builtin_return_address(0));
376 current
->state
= TASK_RUNNING
;
381 expire
= timeout
+ jiffies
;
384 timer
.expires
= expire
;
385 timer
.data
= (unsigned long) current
;
386 timer
.function
= process_timeout
;
391 /* RED-PEN. Timer may be running now on another cpu.
392 * Pray that process will not exit enough fastly.
395 timeout
= expire
- jiffies
;
398 return timeout
< 0 ? 0 : timeout
;
402 * schedule_tail() is getting called from the fork return path. This
403 * cleans up all remaining scheduler things, without impacting the
406 static inline void __schedule_tail(struct task_struct
*prev
)
409 if ((prev
->state
== TASK_RUNNING
) &&
410 (prev
!= idle_task(smp_processor_id()))) {
413 spin_lock_irqsave(&runqueue_lock
, flags
);
414 reschedule_idle(prev
, flags
); // spin_unlocks runqueue
421 void schedule_tail(struct task_struct
*prev
)
423 __schedule_tail(prev
);
427 * 'schedule()' is the scheduler function. It's a very simple and nice
428 * scheduler: it's not perfect, but certainly works for most things.
430 * The goto is "interesting".
432 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
433 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
434 * information in task[0] is never used.
436 asmlinkage
void schedule(void)
438 struct schedule_data
* sched_data
;
439 struct task_struct
*prev
, *next
, *p
;
440 struct list_head
*tmp
;
443 if (!current
->active_mm
) BUG();
445 goto handle_tq_scheduler
;
449 this_cpu
= prev
->processor
;
452 goto scheduling_in_interrupt
;
454 release_kernel_lock(prev
, this_cpu
);
456 /* Do "administrative" work here while we don't hold any locks */
457 if (softirq_state
[this_cpu
].active
& softirq_state
[this_cpu
].mask
)
462 * 'sched_data' is protected by the fact that we can run
463 * only one process per CPU.
465 sched_data
= & aligned_data
[this_cpu
].schedule_data
;
467 spin_lock_irq(&runqueue_lock
);
469 /* move an exhausted RR process to be last.. */
470 if (prev
->policy
== SCHED_RR
)
474 switch (prev
->state
& ~TASK_EXCLUSIVE
) {
475 case TASK_INTERRUPTIBLE
:
476 if (signal_pending(prev
)) {
477 prev
->state
= TASK_RUNNING
;
481 del_from_runqueue(prev
);
484 prev
->need_resched
= 0;
487 * this is the scheduler proper:
492 * Default process to select..
494 next
= idle_task(this_cpu
);
496 if (prev
->state
== TASK_RUNNING
)
500 list_for_each(tmp
, &runqueue_head
) {
501 p
= list_entry(tmp
, struct task_struct
, run_list
);
502 if (can_schedule(p
)) {
503 int weight
= goodness(p
, this_cpu
, prev
->active_mm
);
505 c
= weight
, next
= p
;
509 /* Do we need to re-calculate counters? */
513 * from this point on nothing can prevent us from
514 * switching to the next task, save this fact in
517 sched_data
->curr
= next
;
520 next
->processor
= this_cpu
;
522 spin_unlock_irq(&runqueue_lock
);
529 * maintain the per-process 'average timeslice' value.
530 * (this has to be recalculated even if we reschedule to
531 * the same process) Currently this is only used on SMP,
532 * and it's approximate, so we do not have to maintain
533 * it while holding the runqueue spinlock.
536 cycles_t t
, this_slice
;
539 this_slice
= t
- sched_data
->last_schedule
;
540 sched_data
->last_schedule
= t
;
543 * Exponentially fading average calculation, with
544 * some weight so it doesnt get fooled easily by
545 * smaller irregularities.
547 prev
->avg_slice
= (this_slice
*1 + prev
->avg_slice
*1)/2;
551 * We drop the scheduler lock early (it's a global spinlock),
552 * thus we have to lock the previous process from getting
553 * rescheduled during switch_to().
558 kstat
.context_swtch
++;
560 * there are 3 processes which are affected by a context switch:
562 * prev == .... ==> (last => next)
564 * It's the 'much more previous' 'prev' that is on next's stack,
565 * but prev is set to (the just run) 'last' process by switch_to().
566 * This might sound slightly confusing but makes tons of sense.
570 struct mm_struct
*mm
= next
->mm
;
571 struct mm_struct
*oldmm
= prev
->active_mm
;
573 if (next
->active_mm
) BUG();
574 next
->active_mm
= oldmm
;
575 atomic_inc(&oldmm
->mm_count
);
576 enter_lazy_tlb(oldmm
, next
, this_cpu
);
578 if (next
->active_mm
!= mm
) BUG();
579 switch_mm(oldmm
, mm
, next
, this_cpu
);
583 prev
->active_mm
= NULL
;
589 * This just switches the register state and the
592 switch_to(prev
, next
, prev
);
593 __schedule_tail(prev
);
596 reacquire_kernel_lock(current
);
601 struct task_struct
*p
;
602 spin_unlock_irq(&runqueue_lock
);
603 read_lock(&tasklist_lock
);
605 p
->counter
= (p
->counter
>> 1) + p
->priority
;
606 read_unlock(&tasklist_lock
);
607 spin_lock_irq(&runqueue_lock
);
609 goto repeat_schedule
;
612 c
= prev_goodness(prev
, this_cpu
, prev
->active_mm
);
614 goto still_running_back
;
618 goto handle_softirq_back
;
621 run_task_queue(&tq_scheduler
);
622 goto tq_scheduler_back
;
625 if (!prev
->counter
) {
626 prev
->counter
= prev
->priority
;
627 move_last_runqueue(prev
);
631 scheduling_in_interrupt
:
632 printk("Scheduling in interrupt\n");
637 static inline void __wake_up_common(wait_queue_head_t
*q
, unsigned int mode
, const int sync
)
639 struct list_head
*tmp
, *head
;
640 struct task_struct
*p
;
646 wq_write_lock_irqsave(&q
->lock
, flags
);
649 CHECK_MAGIC_WQHEAD(q
);
652 head
= &q
->task_list
;
654 if (!head
->next
|| !head
->prev
)
657 list_for_each(tmp
, head
) {
659 wait_queue_t
*curr
= list_entry(tmp
, wait_queue_t
, task_list
);
662 CHECK_MAGIC(curr
->__magic
);
666 if (state
& (mode
& ~TASK_EXCLUSIVE
)) {
668 curr
->__waker
= (long)__builtin_return_address(0);
671 wake_up_process_synchronous(p
);
674 if (state
& mode
& TASK_EXCLUSIVE
)
678 wq_write_unlock_irqrestore(&q
->lock
, flags
);
683 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
)
685 __wake_up_common(q
, mode
, 0);
688 void __wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
)
690 __wake_up_common(q
, mode
, 1);
693 #define SLEEP_ON_VAR \
694 unsigned long flags; \
696 init_waitqueue_entry(&wait, current);
698 #define SLEEP_ON_HEAD \
699 wq_write_lock_irqsave(&q->lock,flags); \
700 __add_wait_queue(q, &wait); \
701 wq_write_unlock(&q->lock);
703 #define SLEEP_ON_TAIL \
704 wq_write_lock_irq(&q->lock); \
705 __remove_wait_queue(q, &wait); \
706 wq_write_unlock_irqrestore(&q->lock,flags);
708 void interruptible_sleep_on(wait_queue_head_t
*q
)
712 current
->state
= TASK_INTERRUPTIBLE
;
719 long interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
723 current
->state
= TASK_INTERRUPTIBLE
;
726 timeout
= schedule_timeout(timeout
);
732 void sleep_on(wait_queue_head_t
*q
)
736 current
->state
= TASK_UNINTERRUPTIBLE
;
743 long sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
747 current
->state
= TASK_UNINTERRUPTIBLE
;
750 timeout
= schedule_timeout(timeout
);
756 void scheduling_functions_end_here(void) { }
761 * This has been replaced by sys_setpriority. Maybe it should be
762 * moved into the arch dependent tree for those ports that require
763 * it for backward compatibility?
766 asmlinkage
long sys_nice(int increment
)
768 unsigned long newprio
;
772 * Setpriority might change our priority at the same moment.
773 * We don't have to worry. Conceptually one call occurs first
774 * and we have a single winner.
779 if (!capable(CAP_SYS_NICE
))
781 newprio
= -increment
;
788 * do a "normalization" of the priority (traditionally
789 * Unix nice values are -20 to 20; Linux doesn't really
790 * use that kind of thing, but uses the length of the
791 * timeslice instead (default 200 ms). The rounding is
792 * why we want to avoid negative values.
794 newprio
= (newprio
* DEF_PRIORITY
+ 10) / 20;
797 increment
= -increment
;
799 * Current->priority can change between this point
800 * and the assignment. We are assigning not doing add/subs
801 * so thats ok. Conceptually a process might just instantaneously
802 * read the value we stomp over. I don't think that is an issue
803 * unless posix makes it one. If so we can loop on changes
804 * to current->priority.
806 newprio
= current
->priority
- increment
;
807 if ((signed) newprio
< 1)
809 if (newprio
> DEF_PRIORITY
*2)
810 newprio
= DEF_PRIORITY
*2;
811 current
->priority
= newprio
;
817 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
819 struct task_struct
*tsk
= current
;
822 tsk
= find_task_by_pid(pid
);
826 static int setscheduler(pid_t pid
, int policy
,
827 struct sched_param
*param
)
829 struct sched_param lp
;
830 struct task_struct
*p
;
834 if (!param
|| pid
< 0)
838 if (copy_from_user(&lp
, param
, sizeof(struct sched_param
)))
842 * We play safe to avoid deadlocks.
844 spin_lock_irq(&runqueue_lock
);
845 read_lock(&tasklist_lock
);
847 p
= find_process_by_pid(pid
);
857 if (policy
!= SCHED_FIFO
&& policy
!= SCHED_RR
&&
858 policy
!= SCHED_OTHER
)
863 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
864 * priority for SCHED_OTHER is 0.
867 if (lp
.sched_priority
< 0 || lp
.sched_priority
> 99)
869 if ((policy
== SCHED_OTHER
) != (lp
.sched_priority
== 0))
873 if ((policy
== SCHED_FIFO
|| policy
== SCHED_RR
) &&
874 !capable(CAP_SYS_NICE
))
876 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
877 !capable(CAP_SYS_NICE
))
882 p
->rt_priority
= lp
.sched_priority
;
883 if (task_on_runqueue(p
))
884 move_first_runqueue(p
);
886 current
->need_resched
= 1;
889 read_unlock(&tasklist_lock
);
890 spin_unlock_irq(&runqueue_lock
);
896 asmlinkage
long sys_sched_setscheduler(pid_t pid
, int policy
,
897 struct sched_param
*param
)
899 return setscheduler(pid
, policy
, param
);
902 asmlinkage
long sys_sched_setparam(pid_t pid
, struct sched_param
*param
)
904 return setscheduler(pid
, -1, param
);
907 asmlinkage
long sys_sched_getscheduler(pid_t pid
)
909 struct task_struct
*p
;
916 read_lock(&tasklist_lock
);
919 p
= find_process_by_pid(pid
);
926 read_unlock(&tasklist_lock
);
932 asmlinkage
long sys_sched_getparam(pid_t pid
, struct sched_param
*param
)
934 struct task_struct
*p
;
935 struct sched_param lp
;
939 if (!param
|| pid
< 0)
942 read_lock(&tasklist_lock
);
943 p
= find_process_by_pid(pid
);
947 lp
.sched_priority
= p
->rt_priority
;
948 read_unlock(&tasklist_lock
);
951 * This one might sleep, we cannot do it with a spinlock held ...
953 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
959 read_unlock(&tasklist_lock
);
963 asmlinkage
long sys_sched_yield(void)
965 spin_lock_irq(&runqueue_lock
);
966 if (current
->policy
== SCHED_OTHER
)
967 current
->policy
|= SCHED_YIELD
;
968 current
->need_resched
= 1;
969 move_last_runqueue(current
);
970 spin_unlock_irq(&runqueue_lock
);
974 asmlinkage
long sys_sched_get_priority_max(int policy
)
990 asmlinkage
long sys_sched_get_priority_min(int policy
)
1005 asmlinkage
long sys_sched_rr_get_interval(pid_t pid
, struct timespec
*interval
)
1011 if (copy_to_user(interval
, &t
, sizeof(struct timespec
)))
1016 static void show_task(struct task_struct
* p
)
1018 unsigned long free
= 0;
1020 static const char * stat_nam
[] = { "R", "S", "D", "Z", "T", "W" };
1022 printk("%-8s ", p
->comm
);
1023 state
= p
->state
? ffz(~p
->state
) + 1 : 0;
1024 if (((unsigned) state
) < sizeof(stat_nam
)/sizeof(char *))
1025 printk(stat_nam
[state
]);
1028 #if (BITS_PER_LONG == 32)
1030 printk(" current ");
1032 printk(" %08lX ", thread_saved_pc(&p
->thread
));
1035 printk(" current task ");
1037 printk(" %016lx ", thread_saved_pc(&p
->thread
));
1040 unsigned long * n
= (unsigned long *) (p
+1);
1043 free
= (unsigned long) n
- (unsigned long)(p
+1);
1045 printk("%5lu %5d %6d ", free
, p
->pid
, p
->p_pptr
->pid
);
1047 printk("%5d ", p
->p_cptr
->pid
);
1051 printk(" (L-TLB) ");
1053 printk(" (NOTLB) ");
1055 printk("%7d", p
->p_ysptr
->pid
);
1059 printk(" %5d\n", p
->p_osptr
->pid
);
1064 struct signal_queue
*q
;
1065 char s
[sizeof(sigset_t
)*2+1], b
[sizeof(sigset_t
)*2+1];
1067 render_sigset_t(&p
->signal
, s
);
1068 render_sigset_t(&p
->blocked
, b
);
1069 printk(" sig: %d %s %s :", signal_pending(p
), s
, b
);
1070 for (q
= p
->sigqueue
; q
; q
= q
->next
)
1071 printk(" %d", q
->info
.si_signo
);
1076 char * render_sigset_t(sigset_t
*set
, char *buffer
)
1081 if (sigismember(set
, i
+1)) x
|= 1;
1082 if (sigismember(set
, i
+2)) x
|= 2;
1083 if (sigismember(set
, i
+3)) x
|= 4;
1084 if (sigismember(set
, i
+4)) x
|= 8;
1085 *buffer
++ = (x
< 10 ? '0' : 'a' - 10) + x
;
1091 void show_state(void)
1093 struct task_struct
*p
;
1095 #if (BITS_PER_LONG == 32)
1098 printk(" task PC stack pid father child younger older\n");
1102 printk(" task PC stack pid father child younger older\n");
1104 read_lock(&tasklist_lock
);
1107 read_unlock(&tasklist_lock
);
1111 * Put all the gunge required to become a kernel thread without
1112 * attached user resources in one place where it belongs.
1115 void daemonize(void)
1117 struct fs_struct
*fs
;
1121 * If we were started as result of loading a module, close all of the
1122 * user space pages. We don't need them, and if we didn't close them
1123 * they would be locked into memory.
1127 current
->session
= 1;
1130 /* Become as one with the init task */
1132 exit_fs(current
); /* current->fs->count--; */
1135 atomic_inc(&fs
->count
);
1139 void __init
init_idle(void)
1141 struct schedule_data
* sched_data
;
1142 sched_data
= &aligned_data
[smp_processor_id()].schedule_data
;
1144 if (current
!= &init_task
&& task_on_runqueue(current
)) {
1145 printk("UGH! (%d:%d) was on the runqueue, removing.\n",
1146 smp_processor_id(), current
->pid
);
1147 del_from_runqueue(current
);
1149 sched_data
->curr
= current
;
1150 sched_data
->last_schedule
= get_cycles();
1153 void __init
sched_init(void)
1156 * We have to do a little magic to get the first
1157 * process right in SMP mode.
1159 int cpu
= smp_processor_id();
1162 init_task
.processor
= cpu
;
1164 for(nr
= 0; nr
< PIDHASH_SZ
; nr
++)
1167 init_bh(TIMER_BH
, timer_bh
);
1168 init_bh(TQUEUE_BH
, tqueue_bh
);
1169 init_bh(IMMEDIATE_BH
, immediate_bh
);
1172 * The boot idle thread does lazy MMU switching as well:
1174 atomic_inc(&init_mm
.mm_count
);
1175 enter_lazy_tlb(&init_mm
, current
, cpu
);