4 * Kernel scheduler and related syscalls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
12 * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
16 * 'sched.c' is the main kernel file. It contains scheduling primitives
17 * (sleep_on, wakeup, schedule etc) as well as a number of simple system
18 * call functions (type getpid()), which just extract a field from
22 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel_stat.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
33 extern void timer_bh(void);
34 extern void tqueue_bh(void);
35 extern void immediate_bh(void);
41 unsigned securebits
= SECUREBITS_DEFAULT
; /* systemwide security settings */
43 extern void mem_use(void);
46 * Init task must be ok at boot for the ix86 as we will check its signals
47 * via the SMP irq return path.
50 struct task_struct
* init_tasks
[NR_CPUS
] = {&init_task
, };
53 * The tasklist_lock protects the linked list of processes.
55 * The scheduler lock is protecting against multiple entry
56 * into the scheduling code, and doesn't need to worry
57 * about interrupts (because interrupts cannot call the
60 * The run-queue lock locks the parts that actually access
61 * and change the run-queues, and have to be interrupt-safe.
63 __cacheline_aligned spinlock_t runqueue_lock
= SPIN_LOCK_UNLOCKED
; /* second */
64 __cacheline_aligned rwlock_t tasklist_lock
= RW_LOCK_UNLOCKED
; /* third */
66 static LIST_HEAD(runqueue_head
);
69 * We align per-CPU scheduling data on cacheline boundaries,
70 * to prevent cacheline ping-pong.
73 struct schedule_data
{
74 struct task_struct
* curr
;
75 cycles_t last_schedule
;
77 char __pad
[SMP_CACHE_BYTES
];
78 } aligned_data
[NR_CPUS
] __cacheline_aligned
= { {{&init_task
,0}}};
80 #define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
82 struct kernel_stat kstat
= { 0 };
86 #define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
87 #define can_schedule(p) (!(p)->has_cpu)
91 #define idle_task(cpu) (&init_task)
92 #define can_schedule(p) (1)
96 void scheduling_functions_start_here(void) { }
99 * This is the function that decides how desirable a process is..
100 * You can weigh different processes against each other depending
101 * on what CPU they've run on lately etc to try to handle cache
102 * and TLB miss penalties.
105 * -1000: never select this
106 * 0: out of time, recalculate counters (but it might still be
108 * +ve: "goodness" value (the larger, the better)
109 * +1000: realtime process, select this.
112 static inline int goodness(struct task_struct
* p
, int this_cpu
, struct mm_struct
*this_mm
)
117 * Realtime process, select the first one on the
118 * runqueue (taking priorities within processes
121 if (p
->policy
!= SCHED_OTHER
) {
122 weight
= 1000 + p
->rt_priority
;
127 * Give the process a first-approximation goodness value
128 * according to the number of clock-ticks it has left.
130 * Don't do any other calculations if the time slice is
138 /* Give a largish advantage to the same processor... */
139 /* (this is equivalent to penalizing other processors) */
140 if (p
->processor
== this_cpu
)
141 weight
+= PROC_CHANGE_PENALTY
;
144 /* .. and a slight advantage to the current MM */
145 if (p
->mm
== this_mm
|| !p
->mm
)
147 weight
+= p
->priority
;
154 * subtle. We want to discard a yielded process only if it's being
155 * considered for a reschedule. Wakeup-time 'queries' of the scheduling
156 * state do not count. Another optimization we do: sched_yield()-ed
157 * processes are runnable (and thus will be considered for scheduling)
158 * right when they are calling schedule(). So the only place we need
159 * to care about SCHED_YIELD is when we calculate the previous process'
162 static inline int prev_goodness(struct task_struct
* p
, int this_cpu
, struct mm_struct
*this_mm
)
164 if (p
->policy
& SCHED_YIELD
) {
165 p
->policy
&= ~SCHED_YIELD
;
168 return goodness(p
, this_cpu
, this_mm
);
172 * the 'goodness value' of replacing a process on a given CPU.
173 * positive value means 'replace', zero or negative means 'dont'.
175 static inline int preemption_goodness(struct task_struct
* prev
, struct task_struct
* p
, int cpu
)
177 return goodness(p
, cpu
, prev
->active_mm
) - goodness(prev
, cpu
, prev
->active_mm
);
181 * This is ugly, but reschedule_idle() is very timing-critical.
182 * We enter with the runqueue spinlock held, but we might end
183 * up unlocking it early, so the caller must not unlock the
184 * runqueue, it's always done by reschedule_idle().
186 static inline void reschedule_idle(struct task_struct
* p
, unsigned long flags
)
189 int this_cpu
= smp_processor_id(), target_cpu
;
190 struct task_struct
*tsk
;
191 int cpu
, best_cpu
, i
;
194 * shortcut if the woken up task's last CPU is
197 best_cpu
= p
->processor
;
198 tsk
= idle_task(best_cpu
);
199 if (cpu_curr(best_cpu
) == tsk
)
203 * We know that the preferred CPU has a cache-affine current
204 * process, lets try to find a new idle CPU for the woken-up
207 for (i
= smp_num_cpus
- 1; i
>= 0; i
--) {
208 cpu
= cpu_logical_map(i
);
213 * We use the last available idle CPU. This creates
214 * a priority list between idle CPUs, but this is not
217 if (tsk
== idle_task(cpu
))
222 * No CPU is idle, but maybe this process has enough priority
223 * to preempt it's preferred CPU.
225 tsk
= cpu_curr(best_cpu
);
226 if (preemption_goodness(tsk
, p
, best_cpu
) > 0)
230 * We will get here often - or in the high CPU contention
231 * case. No CPU is idle and this process is either lowprio or
232 * the preferred CPU is highprio. Try to preempt some other CPU
233 * only if it's RT or if it's iteractive and the preferred
234 * cpu won't reschedule shortly.
236 if (p
->avg_slice
< cacheflush_time
|| (p
->policy
& ~SCHED_YIELD
) != SCHED_OTHER
) {
237 for (i
= smp_num_cpus
- 1; i
>= 0; i
--) {
238 cpu
= cpu_logical_map(i
);
242 if (preemption_goodness(tsk
, p
, cpu
) > 0)
247 spin_unlock_irqrestore(&runqueue_lock
, flags
);
251 target_cpu
= tsk
->processor
;
252 tsk
->need_resched
= 1;
253 spin_unlock_irqrestore(&runqueue_lock
, flags
);
255 * the APIC stuff can go outside of the lock because
256 * it uses no task information, only CPU#.
258 if (target_cpu
!= this_cpu
)
259 smp_send_reschedule(target_cpu
);
262 int this_cpu
= smp_processor_id();
263 struct task_struct
*tsk
;
265 tsk
= cpu_curr(this_cpu
);
266 if (preemption_goodness(tsk
, p
, this_cpu
) > 0)
267 tsk
->need_resched
= 1;
268 spin_unlock_irqrestore(&runqueue_lock
, flags
);
275 * This has to add the process to the _beginning_ of the
276 * run-queue, not the end. See the comment about "This is
277 * subtle" in the scheduler proper..
279 static inline void add_to_runqueue(struct task_struct
* p
)
281 list_add(&p
->run_list
, &runqueue_head
);
285 static inline void move_last_runqueue(struct task_struct
* p
)
287 list_del(&p
->run_list
);
288 list_add_tail(&p
->run_list
, &runqueue_head
);
291 static inline void move_first_runqueue(struct task_struct
* p
)
293 list_del(&p
->run_list
);
294 list_add(&p
->run_list
, &runqueue_head
);
298 * Wake up a process. Put it on the run-queue if it's not
299 * already there. The "current" process is always on the
300 * run-queue (except when the actual re-schedule is in
301 * progress), and as such you're allowed to do the simpler
302 * "current->state = TASK_RUNNING" to mark yourself runnable
303 * without the overhead of this.
305 inline void wake_up_process(struct task_struct
* p
)
310 * We want the common case fall through straight, thus the goto.
312 spin_lock_irqsave(&runqueue_lock
, flags
);
313 p
->state
= TASK_RUNNING
;
314 if (task_on_runqueue(p
))
317 reschedule_idle(p
, flags
); // spin_unlocks runqueue
321 spin_unlock_irqrestore(&runqueue_lock
, flags
);
324 static inline void wake_up_process_synchronous(struct task_struct
* p
)
329 * We want the common case fall through straight, thus the goto.
331 spin_lock_irqsave(&runqueue_lock
, flags
);
332 p
->state
= TASK_RUNNING
;
333 if (task_on_runqueue(p
))
337 spin_unlock_irqrestore(&runqueue_lock
, flags
);
340 static void process_timeout(unsigned long __data
)
342 struct task_struct
* p
= (struct task_struct
*) __data
;
347 signed long schedule_timeout(signed long timeout
)
349 struct timer_list timer
;
350 unsigned long expire
;
354 case MAX_SCHEDULE_TIMEOUT
:
356 * These two special cases are useful to be comfortable
357 * in the caller. Nothing more. We could take
358 * MAX_SCHEDULE_TIMEOUT from one of the negative value
359 * but I' d like to return a valid offset (>=0) to allow
360 * the caller to do everything it want with the retval.
366 * Another bit of PARANOID. Note that the retval will be
367 * 0 since no piece of kernel is supposed to do a check
368 * for a negative retval of schedule_timeout() (since it
369 * should never happens anyway). You just have the printk()
370 * that will tell you if something is gone wrong and where.
374 printk(KERN_ERR
"schedule_timeout: wrong timeout "
375 "value %lx from %p\n", timeout
,
376 __builtin_return_address(0));
377 current
->state
= TASK_RUNNING
;
382 expire
= timeout
+ jiffies
;
385 timer
.expires
= expire
;
386 timer
.data
= (unsigned long) current
;
387 timer
.function
= process_timeout
;
392 /* RED-PEN. Timer may be running now on another cpu.
393 * Pray that process will not exit enough fastly.
396 timeout
= expire
- jiffies
;
399 return timeout
< 0 ? 0 : timeout
;
403 * schedule_tail() is getting called from the fork return path. This
404 * cleans up all remaining scheduler things, without impacting the
407 static inline void __schedule_tail(struct task_struct
*prev
)
409 current
->need_resched
|= prev
->need_resched
;
411 if ((prev
->state
== TASK_RUNNING
) &&
412 (prev
!= idle_task(smp_processor_id()))) {
415 spin_lock_irqsave(&runqueue_lock
, flags
);
416 reschedule_idle(prev
, flags
); // spin_unlocks runqueue
420 #endif /* CONFIG_SMP */
423 void schedule_tail(struct task_struct
*prev
)
425 __schedule_tail(prev
);
429 * 'schedule()' is the scheduler function. It's a very simple and nice
430 * scheduler: it's not perfect, but certainly works for most things.
432 * The goto is "interesting".
434 * NOTE!! Task 0 is the 'idle' task, which gets called when no other
435 * tasks can run. It can not be killed, and it cannot sleep. The 'state'
436 * information in task[0] is never used.
438 asmlinkage
void schedule(void)
440 struct schedule_data
* sched_data
;
441 struct task_struct
*prev
, *next
, *p
;
442 struct list_head
*tmp
;
445 if (!current
->active_mm
) BUG();
447 goto handle_tq_scheduler
;
451 this_cpu
= prev
->processor
;
454 goto scheduling_in_interrupt
;
456 release_kernel_lock(prev
, this_cpu
);
458 /* Do "administrative" work here while we don't hold any locks */
459 if (softirq_state
[this_cpu
].active
& softirq_state
[this_cpu
].mask
)
464 * 'sched_data' is protected by the fact that we can run
465 * only one process per CPU.
467 sched_data
= & aligned_data
[this_cpu
].schedule_data
;
469 spin_lock_irq(&runqueue_lock
);
471 /* move an exhausted RR process to be last.. */
472 if (prev
->policy
== SCHED_RR
)
476 switch (prev
->state
& ~TASK_EXCLUSIVE
) {
477 case TASK_INTERRUPTIBLE
:
478 if (signal_pending(prev
)) {
479 prev
->state
= TASK_RUNNING
;
483 del_from_runqueue(prev
);
486 prev
->need_resched
= 0;
489 * this is the scheduler proper:
494 * Default process to select..
496 next
= idle_task(this_cpu
);
498 if (prev
->state
== TASK_RUNNING
)
502 list_for_each(tmp
, &runqueue_head
) {
503 p
= list_entry(tmp
, struct task_struct
, run_list
);
504 if (can_schedule(p
)) {
505 int weight
= goodness(p
, this_cpu
, prev
->active_mm
);
507 c
= weight
, next
= p
;
511 /* Do we need to re-calculate counters? */
515 * from this point on nothing can prevent us from
516 * switching to the next task, save this fact in
519 sched_data
->curr
= next
;
522 next
->processor
= this_cpu
;
524 spin_unlock_irq(&runqueue_lock
);
531 * maintain the per-process 'average timeslice' value.
532 * (this has to be recalculated even if we reschedule to
533 * the same process) Currently this is only used on SMP,
534 * and it's approximate, so we do not have to maintain
535 * it while holding the runqueue spinlock.
538 cycles_t t
, this_slice
;
541 this_slice
= t
- sched_data
->last_schedule
;
542 sched_data
->last_schedule
= t
;
545 * Exponentially fading average calculation, with
546 * some weight so it doesnt get fooled easily by
547 * smaller irregularities.
549 prev
->avg_slice
= (this_slice
*1 + prev
->avg_slice
*1)/2;
553 * We drop the scheduler lock early (it's a global spinlock),
554 * thus we have to lock the previous process from getting
555 * rescheduled during switch_to().
558 #endif /* CONFIG_SMP */
560 kstat
.context_swtch
++;
562 * there are 3 processes which are affected by a context switch:
564 * prev == .... ==> (last => next)
566 * It's the 'much more previous' 'prev' that is on next's stack,
567 * but prev is set to (the just run) 'last' process by switch_to().
568 * This might sound slightly confusing but makes tons of sense.
572 struct mm_struct
*mm
= next
->mm
;
573 struct mm_struct
*oldmm
= prev
->active_mm
;
575 if (next
->active_mm
) BUG();
576 next
->active_mm
= oldmm
;
577 atomic_inc(&oldmm
->mm_count
);
578 enter_lazy_tlb(oldmm
, next
, this_cpu
);
580 if (next
->active_mm
!= mm
) BUG();
581 switch_mm(oldmm
, mm
, next
, this_cpu
);
585 prev
->active_mm
= NULL
;
591 * This just switches the register state and the
594 switch_to(prev
, next
, prev
);
595 __schedule_tail(prev
);
598 reacquire_kernel_lock(current
);
603 struct task_struct
*p
;
604 spin_unlock_irq(&runqueue_lock
);
605 read_lock(&tasklist_lock
);
607 p
->counter
= (p
->counter
>> 1) + p
->priority
;
608 read_unlock(&tasklist_lock
);
609 spin_lock_irq(&runqueue_lock
);
611 goto repeat_schedule
;
614 c
= prev_goodness(prev
, this_cpu
, prev
->active_mm
);
616 goto still_running_back
;
620 goto handle_softirq_back
;
624 * do not run the task queue with disabled interrupts,
625 * cli() wouldn't work on SMP
628 run_task_queue(&tq_scheduler
);
629 goto tq_scheduler_back
;
632 if (!prev
->counter
) {
633 prev
->counter
= prev
->priority
;
634 move_last_runqueue(prev
);
638 scheduling_in_interrupt
:
639 printk("Scheduling in interrupt\n");
644 static inline void __wake_up_common(wait_queue_head_t
*q
, unsigned int mode
, const int sync
)
646 struct list_head
*tmp
, *head
;
647 struct task_struct
*p
;
653 wq_write_lock_irqsave(&q
->lock
, flags
);
656 CHECK_MAGIC_WQHEAD(q
);
659 head
= &q
->task_list
;
661 if (!head
->next
|| !head
->prev
)
664 list_for_each(tmp
, head
) {
666 wait_queue_t
*curr
= list_entry(tmp
, wait_queue_t
, task_list
);
669 CHECK_MAGIC(curr
->__magic
);
673 if (state
& (mode
& ~TASK_EXCLUSIVE
)) {
675 curr
->__waker
= (long)__builtin_return_address(0);
678 wake_up_process_synchronous(p
);
681 if (state
& mode
& TASK_EXCLUSIVE
)
685 wq_write_unlock_irqrestore(&q
->lock
, flags
);
690 void __wake_up(wait_queue_head_t
*q
, unsigned int mode
)
692 __wake_up_common(q
, mode
, 0);
695 void __wake_up_sync(wait_queue_head_t
*q
, unsigned int mode
)
697 __wake_up_common(q
, mode
, 1);
700 #define SLEEP_ON_VAR \
701 unsigned long flags; \
703 init_waitqueue_entry(&wait, current);
705 #define SLEEP_ON_HEAD \
706 wq_write_lock_irqsave(&q->lock,flags); \
707 __add_wait_queue(q, &wait); \
708 wq_write_unlock(&q->lock);
710 #define SLEEP_ON_TAIL \
711 wq_write_lock_irq(&q->lock); \
712 __remove_wait_queue(q, &wait); \
713 wq_write_unlock_irqrestore(&q->lock,flags);
715 void interruptible_sleep_on(wait_queue_head_t
*q
)
719 current
->state
= TASK_INTERRUPTIBLE
;
726 long interruptible_sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
730 current
->state
= TASK_INTERRUPTIBLE
;
733 timeout
= schedule_timeout(timeout
);
739 void sleep_on(wait_queue_head_t
*q
)
743 current
->state
= TASK_UNINTERRUPTIBLE
;
750 long sleep_on_timeout(wait_queue_head_t
*q
, long timeout
)
754 current
->state
= TASK_UNINTERRUPTIBLE
;
757 timeout
= schedule_timeout(timeout
);
763 void scheduling_functions_end_here(void) { }
768 * This has been replaced by sys_setpriority. Maybe it should be
769 * moved into the arch dependent tree for those ports that require
770 * it for backward compatibility?
773 asmlinkage
long sys_nice(int increment
)
775 unsigned long newprio
;
779 * Setpriority might change our priority at the same moment.
780 * We don't have to worry. Conceptually one call occurs first
781 * and we have a single winner.
786 if (!capable(CAP_SYS_NICE
))
788 newprio
= -increment
;
795 * do a "normalization" of the priority (traditionally
796 * Unix nice values are -20 to 20; Linux doesn't really
797 * use that kind of thing, but uses the length of the
798 * timeslice instead (default 200 ms). The rounding is
799 * why we want to avoid negative values.
801 newprio
= (newprio
* DEF_PRIORITY
+ 10) / 20;
804 increment
= -increment
;
806 * Current->priority can change between this point
807 * and the assignment. We are assigning not doing add/subs
808 * so thats ok. Conceptually a process might just instantaneously
809 * read the value we stomp over. I don't think that is an issue
810 * unless posix makes it one. If so we can loop on changes
811 * to current->priority.
813 newprio
= current
->priority
- increment
;
814 if ((signed) newprio
< 1)
816 if (newprio
> DEF_PRIORITY
*2)
817 newprio
= DEF_PRIORITY
*2;
818 current
->priority
= newprio
;
824 static inline struct task_struct
*find_process_by_pid(pid_t pid
)
826 struct task_struct
*tsk
= current
;
829 tsk
= find_task_by_pid(pid
);
833 static int setscheduler(pid_t pid
, int policy
,
834 struct sched_param
*param
)
836 struct sched_param lp
;
837 struct task_struct
*p
;
841 if (!param
|| pid
< 0)
845 if (copy_from_user(&lp
, param
, sizeof(struct sched_param
)))
849 * We play safe to avoid deadlocks.
851 spin_lock_irq(&runqueue_lock
);
852 read_lock(&tasklist_lock
);
854 p
= find_process_by_pid(pid
);
864 if (policy
!= SCHED_FIFO
&& policy
!= SCHED_RR
&&
865 policy
!= SCHED_OTHER
)
870 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
871 * priority for SCHED_OTHER is 0.
874 if (lp
.sched_priority
< 0 || lp
.sched_priority
> 99)
876 if ((policy
== SCHED_OTHER
) != (lp
.sched_priority
== 0))
880 if ((policy
== SCHED_FIFO
|| policy
== SCHED_RR
) &&
881 !capable(CAP_SYS_NICE
))
883 if ((current
->euid
!= p
->euid
) && (current
->euid
!= p
->uid
) &&
884 !capable(CAP_SYS_NICE
))
889 p
->rt_priority
= lp
.sched_priority
;
890 if (task_on_runqueue(p
))
891 move_first_runqueue(p
);
893 current
->need_resched
= 1;
896 read_unlock(&tasklist_lock
);
897 spin_unlock_irq(&runqueue_lock
);
903 asmlinkage
long sys_sched_setscheduler(pid_t pid
, int policy
,
904 struct sched_param
*param
)
906 return setscheduler(pid
, policy
, param
);
909 asmlinkage
long sys_sched_setparam(pid_t pid
, struct sched_param
*param
)
911 return setscheduler(pid
, -1, param
);
914 asmlinkage
long sys_sched_getscheduler(pid_t pid
)
916 struct task_struct
*p
;
924 read_lock(&tasklist_lock
);
925 p
= find_process_by_pid(pid
);
927 retval
= p
->policy
& ~SCHED_YIELD
;
928 read_unlock(&tasklist_lock
);
934 asmlinkage
long sys_sched_getparam(pid_t pid
, struct sched_param
*param
)
936 struct task_struct
*p
;
937 struct sched_param lp
;
941 if (!param
|| pid
< 0)
944 read_lock(&tasklist_lock
);
945 p
= find_process_by_pid(pid
);
949 lp
.sched_priority
= p
->rt_priority
;
950 read_unlock(&tasklist_lock
);
953 * This one might sleep, we cannot do it with a spinlock held ...
955 retval
= copy_to_user(param
, &lp
, sizeof(*param
)) ? -EFAULT
: 0;
961 read_unlock(&tasklist_lock
);
965 asmlinkage
long sys_sched_yield(void)
967 spin_lock_irq(&runqueue_lock
);
968 if (current
->policy
== SCHED_OTHER
)
969 current
->policy
|= SCHED_YIELD
;
970 current
->need_resched
= 1;
971 move_last_runqueue(current
);
972 spin_unlock_irq(&runqueue_lock
);
976 asmlinkage
long sys_sched_get_priority_max(int policy
)
992 asmlinkage
long sys_sched_get_priority_min(int policy
)
1007 asmlinkage
long sys_sched_rr_get_interval(pid_t pid
, struct timespec
*interval
)
1013 if (copy_to_user(interval
, &t
, sizeof(struct timespec
)))
1018 static void show_task(struct task_struct
* p
)
1020 unsigned long free
= 0;
1022 static const char * stat_nam
[] = { "R", "S", "D", "Z", "T", "W" };
1024 printk("%-8s ", p
->comm
);
1025 state
= p
->state
? ffz(~p
->state
) + 1 : 0;
1026 if (((unsigned) state
) < sizeof(stat_nam
)/sizeof(char *))
1027 printk(stat_nam
[state
]);
1030 #if (BITS_PER_LONG == 32)
1032 printk(" current ");
1034 printk(" %08lX ", thread_saved_pc(&p
->thread
));
1037 printk(" current task ");
1039 printk(" %016lx ", thread_saved_pc(&p
->thread
));
1042 unsigned long * n
= (unsigned long *) (p
+1);
1045 free
= (unsigned long) n
- (unsigned long)(p
+1);
1047 printk("%5lu %5d %6d ", free
, p
->pid
, p
->p_pptr
->pid
);
1049 printk("%5d ", p
->p_cptr
->pid
);
1053 printk(" (L-TLB) ");
1055 printk(" (NOTLB) ");
1057 printk("%7d", p
->p_ysptr
->pid
);
1061 printk(" %5d\n", p
->p_osptr
->pid
);
1066 struct signal_queue
*q
;
1067 char s
[sizeof(sigset_t
)*2+1], b
[sizeof(sigset_t
)*2+1];
1069 render_sigset_t(&p
->signal
, s
);
1070 render_sigset_t(&p
->blocked
, b
);
1071 printk(" sig: %d %s %s :", signal_pending(p
), s
, b
);
1072 for (q
= p
->sigqueue
; q
; q
= q
->next
)
1073 printk(" %d", q
->info
.si_signo
);
1078 char * render_sigset_t(sigset_t
*set
, char *buffer
)
1083 if (sigismember(set
, i
+1)) x
|= 1;
1084 if (sigismember(set
, i
+2)) x
|= 2;
1085 if (sigismember(set
, i
+3)) x
|= 4;
1086 if (sigismember(set
, i
+4)) x
|= 8;
1087 *buffer
++ = (x
< 10 ? '0' : 'a' - 10) + x
;
1093 void show_state(void)
1095 struct task_struct
*p
;
1097 #if (BITS_PER_LONG == 32)
1100 printk(" task PC stack pid father child younger older\n");
1104 printk(" task PC stack pid father child younger older\n");
1106 read_lock(&tasklist_lock
);
1109 read_unlock(&tasklist_lock
);
1113 * Put all the gunge required to become a kernel thread without
1114 * attached user resources in one place where it belongs.
1117 void daemonize(void)
1119 struct fs_struct
*fs
;
1123 * If we were started as result of loading a module, close all of the
1124 * user space pages. We don't need them, and if we didn't close them
1125 * they would be locked into memory.
1129 current
->session
= 1;
1132 /* Become as one with the init task */
1134 exit_fs(current
); /* current->fs->count--; */
1137 atomic_inc(&fs
->count
);
1141 void __init
init_idle(void)
1143 struct schedule_data
* sched_data
;
1144 sched_data
= &aligned_data
[smp_processor_id()].schedule_data
;
1146 if (current
!= &init_task
&& task_on_runqueue(current
)) {
1147 printk("UGH! (%d:%d) was on the runqueue, removing.\n",
1148 smp_processor_id(), current
->pid
);
1149 del_from_runqueue(current
);
1151 sched_data
->curr
= current
;
1152 sched_data
->last_schedule
= get_cycles();
1155 extern void init_timervecs (void);
1157 void __init
sched_init(void)
1160 * We have to do a little magic to get the first
1161 * process right in SMP mode.
1163 int cpu
= smp_processor_id();
1166 init_task
.processor
= cpu
;
1168 for(nr
= 0; nr
< PIDHASH_SZ
; nr
++)
1173 init_bh(TIMER_BH
, timer_bh
);
1174 init_bh(TQUEUE_BH
, tqueue_bh
);
1175 init_bh(IMMEDIATE_BH
, immediate_bh
);
1178 * The boot idle thread does lazy MMU switching as well:
1180 atomic_inc(&init_mm
.mm_count
);
1181 enter_lazy_tlb(&init_mm
, current
, cpu
);